aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/ARCMigrate/ARCMT.cpp1
-rw-r--r--lib/ARCMigrate/FileRemapper.cpp2
-rw-r--r--lib/ARCMigrate/Transforms.cpp5
-rw-r--r--lib/AST/APValue.cpp21
-rw-r--r--lib/AST/ASTContext.cpp619
-rw-r--r--lib/AST/ASTDiagnostic.cpp18
-rw-r--r--lib/AST/ASTDumper.cpp58
-rw-r--r--lib/AST/ASTImporter.cpp726
-rw-r--r--lib/AST/ASTTypeTraits.cpp5
-rw-r--r--lib/AST/AttrImpl.cpp3
-rw-r--r--lib/AST/CXXABI.h9
-rw-r--r--lib/AST/CXXInheritance.cpp1
-rw-r--r--lib/AST/Comment.cpp157
-rw-r--r--lib/AST/CommentBriefParser.cpp1
-rw-r--r--lib/AST/CommentLexer.cpp10
-rw-r--r--lib/AST/CommentParser.cpp2
-rw-r--r--lib/AST/CommentSema.cpp11
-rw-r--r--lib/AST/Decl.cpp164
-rw-r--r--lib/AST/DeclBase.cpp96
-rw-r--r--lib/AST/DeclCXX.cpp198
-rw-r--r--lib/AST/DeclGroup.cpp3
-rw-r--r--lib/AST/DeclObjC.cpp29
-rw-r--r--lib/AST/DeclOpenMP.cpp13
-rw-r--r--lib/AST/DeclPrinter.cpp237
-rw-r--r--lib/AST/DeclTemplate.cpp109
-rw-r--r--lib/AST/DeclarationName.cpp13
-rw-r--r--lib/AST/Expr.cpp185
-rw-r--r--lib/AST/ExprCXX.cpp101
-rw-r--r--lib/AST/ExprClassification.cpp21
-rw-r--r--lib/AST/ExprConstant.cpp1053
-rw-r--r--lib/AST/ExprObjC.cpp2
-rw-r--r--lib/AST/ItaniumCXXABI.cpp18
-rw-r--r--lib/AST/ItaniumMangle.cpp163
-rw-r--r--lib/AST/Mangle.cpp3
-rw-r--r--lib/AST/MicrosoftCXXABI.cpp17
-rw-r--r--lib/AST/MicrosoftMangle.cpp114
-rw-r--r--lib/AST/NestedNameSpecifier.cpp13
-rw-r--r--lib/AST/OpenMPClause.cpp127
-rw-r--r--lib/AST/RawCommentList.cpp9
-rw-r--r--lib/AST/Stmt.cpp30
-rw-r--r--lib/AST/StmtCXX.cpp4
-rw-r--r--lib/AST/StmtObjC.cpp4
-rw-r--r--lib/AST/StmtOpenMP.cpp609
-rw-r--r--lib/AST/StmtPrinter.cpp58
-rw-r--r--lib/AST/StmtProfile.cpp58
-rw-r--r--lib/AST/TemplateBase.cpp27
-rw-r--r--lib/AST/Type.cpp136
-rw-r--r--lib/AST/TypeLoc.cpp16
-rw-r--r--lib/AST/TypePrinter.cpp31
-rw-r--r--lib/AST/VTableBuilder.cpp230
-rw-r--r--lib/ASTMatchers/ASTMatchFinder.cpp38
-rw-r--r--lib/ASTMatchers/ASTMatchersInternal.cpp13
-rw-r--r--lib/ASTMatchers/Dynamic/Marshallers.h69
-rw-r--r--lib/ASTMatchers/Dynamic/Parser.cpp1
-rw-r--r--lib/ASTMatchers/Dynamic/Registry.cpp61
-rw-r--r--lib/Analysis/AnalysisDeclContext.cpp14
-rw-r--r--lib/Analysis/CFG.cpp50
-rw-r--r--lib/Analysis/CMakeLists.txt2
-rw-r--r--lib/Analysis/CallGraph.cpp18
-rw-r--r--lib/Analysis/CloneDetection.cpp894
-rw-r--r--lib/Analysis/Consumed.cpp5
-rw-r--r--lib/Analysis/FormatString.cpp27
-rw-r--r--lib/Analysis/FormatStringParsing.h1
-rw-r--r--lib/Analysis/LiveVariables.cpp27
-rw-r--r--lib/Analysis/OSLog.cpp202
-rw-r--r--lib/Analysis/PrintfFormatString.cpp43
-rw-r--r--lib/Analysis/ReachableCode.cpp2
-rw-r--r--lib/Analysis/ScanfFormatString.cpp6
-rw-r--r--lib/Analysis/ThreadSafety.cpp8
-rw-r--r--lib/Analysis/ThreadSafetyCommon.cpp9
-rw-r--r--lib/Analysis/UninitializedValues.cpp3
-rw-r--r--lib/Basic/CMakeLists.txt2
-rw-r--r--lib/Basic/Diagnostic.cpp21
-rw-r--r--lib/Basic/DiagnosticOptions.cpp2
-rw-r--r--lib/Basic/FileManager.cpp24
-rw-r--r--lib/Basic/FileSystemStatCache.cpp6
-rw-r--r--lib/Basic/IdentifierTable.cpp18
-rw-r--r--lib/Basic/LangOptions.cpp7
-rw-r--r--lib/Basic/Module.cpp11
-rw-r--r--lib/Basic/OpenMPKinds.cpp137
-rw-r--r--lib/Basic/SourceLocation.cpp1
-rw-r--r--lib/Basic/SourceManager.cpp40
-rw-r--r--lib/Basic/TargetInfo.cpp48
-rw-r--r--lib/Basic/Targets.cpp590
-rw-r--r--lib/Basic/Version.cpp2
-rw-r--r--lib/Basic/VirtualFileSystem.cpp134
-rw-r--r--lib/CodeGen/ABIInfo.h2
-rw-r--r--lib/CodeGen/BackendUtil.cpp390
-rw-r--r--lib/CodeGen/CGAtomic.cpp6
-rw-r--r--lib/CodeGen/CGBlocks.cpp152
-rw-r--r--lib/CodeGen/CGBlocks.h17
-rw-r--r--lib/CodeGen/CGBuilder.h18
-rw-r--r--lib/CodeGen/CGBuiltin.cpp1093
-rw-r--r--lib/CodeGen/CGCUDABuiltin.cpp6
-rw-r--r--lib/CodeGen/CGCUDANV.cpp92
-rw-r--r--lib/CodeGen/CGCUDARuntime.cpp11
-rw-r--r--lib/CodeGen/CGCXX.cpp67
-rw-r--r--lib/CodeGen/CGCXXABI.cpp5
-rw-r--r--lib/CodeGen/CGCXXABI.h13
-rw-r--r--lib/CodeGen/CGCall.cpp352
-rw-r--r--lib/CodeGen/CGCall.h143
-rw-r--r--lib/CodeGen/CGClass.cpp246
-rw-r--r--lib/CodeGen/CGCleanup.cpp2
-rw-r--r--lib/CodeGen/CGCleanup.h3
-rw-r--r--lib/CodeGen/CGCoroutine.cpp116
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp593
-rw-r--r--lib/CodeGen/CGDebugInfo.h54
-rw-r--r--lib/CodeGen/CGDecl.cpp122
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp9
-rw-r--r--lib/CodeGen/CGException.cpp14
-rw-r--r--lib/CodeGen/CGExpr.cpp383
-rw-r--r--lib/CodeGen/CGExprAgg.cpp115
-rw-r--r--lib/CodeGen/CGExprCXX.cpp684
-rw-r--r--lib/CodeGen/CGExprComplex.cpp11
-rw-r--r--lib/CodeGen/CGExprConstant.cpp99
-rw-r--r--lib/CodeGen/CGExprScalar.cpp149
-rw-r--r--lib/CodeGen/CGLoopInfo.cpp31
-rw-r--r--lib/CodeGen/CGLoopInfo.h11
-rw-r--r--lib/CodeGen/CGObjC.cpp49
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp1059
-rw-r--r--lib/CodeGen/CGObjCMac.cpp2207
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp27
-rw-r--r--lib/CodeGen/CGObjCRuntime.h3
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.cpp38
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.h14
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp613
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h50
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp5
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.h7
-rw-r--r--lib/CodeGen/CGStmt.cpp64
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp256
-rw-r--r--lib/CodeGen/CGVTT.cpp16
-rw-r--r--lib/CodeGen/CGVTables.cpp328
-rw-r--r--lib/CodeGen/CGVTables.h33
-rw-r--r--lib/CodeGen/CMakeLists.txt7
-rw-r--r--lib/CodeGen/CodeGenAction.cpp133
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp143
-rw-r--r--lib/CodeGen/CodeGenFunction.h177
-rw-r--r--lib/CodeGen/CodeGenModule.cpp607
-rw-r--r--lib/CodeGen/CodeGenModule.h97
-rw-r--r--lib/CodeGen/CodeGenPGO.cpp4
-rw-r--r--lib/CodeGen/CodeGenPGO.h2
-rw-r--r--lib/CodeGen/CodeGenTypeCache.h5
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp24
-rw-r--r--lib/CodeGen/CodeGenTypes.h10
-rw-r--r--lib/CodeGen/ConstantBuilder.h444
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp73
-rw-r--r--lib/CodeGen/CoverageMappingGen.h1
-rw-r--r--lib/CodeGen/EHScopeStack.h6
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp293
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp314
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp2
-rw-r--r--lib/CodeGen/ObjectFilePCHContainerOperations.cpp37
-rw-r--r--lib/CodeGen/SanitizerMetadata.cpp8
-rw-r--r--lib/CodeGen/SwiftCallingConv.cpp9
-rw-r--r--lib/CodeGen/TargetInfo.cpp488
-rw-r--r--lib/CodeGen/TargetInfo.h16
-rw-r--r--lib/CodeGen/VarBypassDetector.cpp168
-rw-r--r--lib/CodeGen/VarBypassDetector.h70
-rw-r--r--lib/Driver/Action.cpp57
-rw-r--r--lib/Driver/CMakeLists.txt1
-rw-r--r--lib/Driver/Compilation.cpp17
-rw-r--r--lib/Driver/CrossWindowsToolChain.cpp1
-rw-r--r--lib/Driver/Distro.cpp134
-rw-r--r--lib/Driver/Driver.cpp1921
-rw-r--r--lib/Driver/Job.cpp141
-rw-r--r--lib/Driver/MSVCToolChain.cpp105
-rw-r--r--lib/Driver/Multilib.cpp2
-rw-r--r--lib/Driver/SanitizerArgs.cpp67
-rw-r--r--lib/Driver/Tool.cpp10
-rw-r--r--lib/Driver/ToolChain.cpp194
-rw-r--r--lib/Driver/ToolChains.cpp1133
-rw-r--r--lib/Driver/ToolChains.h318
-rw-r--r--lib/Driver/Tools.cpp1755
-rw-r--r--lib/Driver/Tools.h46
-rw-r--r--lib/Driver/Types.cpp116
-rw-r--r--lib/Edit/RewriteObjCFoundationAPI.cpp2
-rw-r--r--lib/Format/BreakableToken.cpp16
-rw-r--r--lib/Format/CMakeLists.txt1
-rw-r--r--lib/Format/Comments.cpp36
-rw-r--r--lib/Format/Comments.h33
-rw-r--r--lib/Format/ContinuationIndenter.cpp14
-rw-r--r--lib/Format/Encoding.h29
-rw-r--r--lib/Format/Format.cpp498
-rw-r--r--lib/Format/FormatToken.cpp15
-rw-r--r--lib/Format/FormatToken.h19
-rw-r--r--lib/Format/FormatTokenLexer.cpp70
-rw-r--r--lib/Format/FormatTokenLexer.h21
-rw-r--r--lib/Format/SortJavaScriptImports.cpp62
-rw-r--r--lib/Format/TokenAnalyzer.cpp16
-rw-r--r--lib/Format/TokenAnalyzer.h7
-rw-r--r--lib/Format/TokenAnnotator.cpp197
-rw-r--r--lib/Format/TokenAnnotator.h1
-rw-r--r--lib/Format/UnwrappedLineFormatter.cpp3
-rw-r--r--lib/Format/UnwrappedLineFormatter.h2
-rw-r--r--lib/Format/UnwrappedLineParser.cpp46
-rw-r--r--lib/Format/WhitespaceManager.cpp15
-rw-r--r--lib/Format/WhitespaceManager.h3
-rw-r--r--lib/Frontend/ASTConsumers.cpp1
-rw-r--r--lib/Frontend/ASTUnit.cpp13
-rw-r--r--lib/Frontend/CacheTokens.cpp30
-rw-r--r--lib/Frontend/ChainedIncludesSource.cpp3
-rw-r--r--lib/Frontend/CompilerInstance.cpp228
-rw-r--r--lib/Frontend/CompilerInvocation.cpp211
-rw-r--r--lib/Frontend/DependencyFile.cpp19
-rw-r--r--lib/Frontend/DiagnosticRenderer.cpp2
-rw-r--r--lib/Frontend/FrontendAction.cpp12
-rw-r--r--lib/Frontend/FrontendActions.cpp116
-rw-r--r--lib/Frontend/FrontendOptions.cpp2
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp7
-rw-r--r--lib/Frontend/InitPreprocessor.cpp82
-rw-r--r--lib/Frontend/ModuleDependencyCollector.cpp66
-rw-r--r--lib/Frontend/MultiplexConsumer.cpp6
-rw-r--r--lib/Frontend/PCHContainerOperations.cpp7
-rw-r--r--lib/Frontend/PrintPreprocessedOutput.cpp34
-rw-r--r--lib/Frontend/Rewrite/FrontendActions.cpp3
-rw-r--r--lib/Frontend/Rewrite/HTMLPrint.cpp5
-rw-r--r--lib/Frontend/Rewrite/InclusionRewriter.cpp6
-rw-r--r--lib/Frontend/Rewrite/RewriteModernObjC.cpp33
-rw-r--r--lib/Frontend/Rewrite/RewriteObjC.cpp36
-rw-r--r--lib/Frontend/Rewrite/RewriteTest.cpp4
-rw-r--r--lib/Frontend/SerializedDiagnosticPrinter.cpp5
-rw-r--r--lib/Frontend/SerializedDiagnosticReader.cpp17
-rw-r--r--lib/Frontend/TextDiagnostic.cpp37
-rw-r--r--lib/Frontend/TextDiagnosticPrinter.cpp2
-rw-r--r--lib/Frontend/VerifyDiagnosticConsumer.cpp3
-rw-r--r--lib/FrontendTool/ExecuteCompilerInvocation.cpp10
-rw-r--r--lib/Headers/CMakeLists.txt18
-rw-r--r--lib/Headers/__clang_cuda_builtin_vars.h (renamed from lib/Headers/cuda_builtin_vars.h)0
-rw-r--r--lib/Headers/__clang_cuda_cmath.h341
-rw-r--r--lib/Headers/__clang_cuda_complex_builtins.h203
-rw-r--r--lib/Headers/__clang_cuda_math_forward_declares.h25
-rw-r--r--lib/Headers/__clang_cuda_runtime_wrapper.h46
-rw-r--r--lib/Headers/__wmmintrin_aes.h12
-rw-r--r--lib/Headers/__wmmintrin_pclmul.h15
-rw-r--r--lib/Headers/altivec.h2274
-rw-r--r--lib/Headers/ammintrin.h77
-rw-r--r--lib/Headers/armintr.h45
-rw-r--r--lib/Headers/avx512bwintrin.h428
-rw-r--r--lib/Headers/avx512dqintrin.h437
-rw-r--r--lib/Headers/avx512fintrin.h2346
-rw-r--r--lib/Headers/avx512vlbwintrin.h1822
-rw-r--r--lib/Headers/avx512vldqintrin.h439
-rw-r--r--lib/Headers/avx512vlintrin.h2470
-rw-r--r--lib/Headers/avxintrin.h3096
-rw-r--r--lib/Headers/bmiintrin.h64
-rw-r--r--lib/Headers/cuda_wrappers/algorithm96
-rw-r--r--lib/Headers/cuda_wrappers/complex82
-rw-r--r--lib/Headers/cuda_wrappers/new47
-rw-r--r--lib/Headers/emmintrin.h2546
-rw-r--r--lib/Headers/f16cintrin.h28
-rw-r--r--lib/Headers/float.h7
-rw-r--r--lib/Headers/fxsrintrin.h62
-rw-r--r--lib/Headers/ia32intrin.h6
-rw-r--r--lib/Headers/immintrin.h35
-rw-r--r--lib/Headers/intrin.h682
-rw-r--r--lib/Headers/lzcntintrin.h50
-rw-r--r--lib/Headers/mmintrin.h174
-rw-r--r--lib/Headers/module.modulemap2
-rw-r--r--lib/Headers/opencl-c.h655
-rw-r--r--lib/Headers/pmmintrin.h53
-rw-r--r--lib/Headers/popcntintrin.h8
-rw-r--r--lib/Headers/stdatomic.h10
-rw-r--r--lib/Headers/tmmintrin.h42
-rw-r--r--lib/Headers/xmmintrin.h579
-rw-r--r--lib/Index/CommentToXML.cpp45
-rw-r--r--lib/Index/IndexBody.cpp45
-rw-r--r--lib/Index/IndexDecl.cpp46
-rw-r--r--lib/Index/IndexSymbol.cpp130
-rw-r--r--lib/Index/IndexingContext.cpp20
-rw-r--r--lib/Index/USRGeneration.cpp32
-rw-r--r--lib/Lex/HeaderMap.cpp2
-rw-r--r--lib/Lex/HeaderSearch.cpp128
-rw-r--r--lib/Lex/Lexer.cpp82
-rw-r--r--lib/Lex/LiteralSupport.cpp52
-rw-r--r--lib/Lex/MacroInfo.cpp2
-rw-r--r--lib/Lex/ModuleMap.cpp119
-rw-r--r--lib/Lex/PPCaching.cpp4
-rw-r--r--lib/Lex/PPDirectives.cpp87
-rw-r--r--lib/Lex/PPExpressions.cpp17
-rw-r--r--lib/Lex/PPLexerChange.cpp16
-rw-r--r--lib/Lex/PPMacroExpansion.cpp95
-rw-r--r--lib/Lex/PTHLexer.cpp17
-rw-r--r--lib/Lex/Pragma.cpp66
-rw-r--r--lib/Lex/PreprocessingRecord.cpp15
-rw-r--r--lib/Lex/Preprocessor.cpp73
-rw-r--r--lib/Lex/TokenConcatenation.cpp2
-rw-r--r--lib/Lex/TokenLexer.cpp11
-rw-r--r--lib/Parse/ParseAST.cpp22
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp52
-rw-r--r--lib/Parse/ParseDecl.cpp211
-rw-r--r--lib/Parse/ParseDeclCXX.cpp539
-rw-r--r--lib/Parse/ParseExpr.cpp19
-rw-r--r--lib/Parse/ParseExprCXX.cpp168
-rw-r--r--lib/Parse/ParseInit.cpp3
-rw-r--r--lib/Parse/ParseObjc.cpp47
-rw-r--r--lib/Parse/ParseOpenMP.cpp58
-rw-r--r--lib/Parse/ParsePragma.cpp201
-rw-r--r--lib/Parse/ParseStmt.cpp10
-rw-r--r--lib/Parse/ParseStmtAsm.cpp6
-rw-r--r--lib/Parse/ParseTemplate.cpp12
-rw-r--r--lib/Parse/ParseTentative.cpp13
-rw-r--r--lib/Parse/Parser.cpp219
-rw-r--r--lib/Rewrite/HTMLRewrite.cpp19
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp60
-rw-r--r--lib/Sema/AttributeList.cpp3
-rw-r--r--lib/Sema/CodeCompleteConsumer.cpp31
-rw-r--r--lib/Sema/DeclSpec.cpp95
-rw-r--r--lib/Sema/DelayedDiagnostic.cpp33
-rw-r--r--lib/Sema/JumpDiagnostics.cpp23
-rw-r--r--lib/Sema/MultiplexExternalSemaSource.cpp3
-rw-r--r--lib/Sema/ScopeInfo.cpp2
-rw-r--r--lib/Sema/Sema.cpp192
-rw-r--r--lib/Sema/SemaAttr.cpp2
-rw-r--r--lib/Sema/SemaCUDA.cpp520
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp133
-rw-r--r--lib/Sema/SemaCast.cpp50
-rw-r--r--lib/Sema/SemaChecking.cpp1250
-rw-r--r--lib/Sema/SemaCodeComplete.cpp397
-rw-r--r--lib/Sema/SemaCoroutine.cpp373
-rw-r--r--lib/Sema/SemaDecl.cpp1311
-rw-r--r--lib/Sema/SemaDeclAttr.cpp626
-rw-r--r--lib/Sema/SemaDeclCXX.cpp1223
-rw-r--r--lib/Sema/SemaDeclObjC.cpp49
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp240
-rw-r--r--lib/Sema/SemaExpr.cpp885
-rw-r--r--lib/Sema/SemaExprCXX.cpp1908
-rw-r--r--lib/Sema/SemaExprMember.cpp111
-rw-r--r--lib/Sema/SemaExprObjC.cpp22
-rw-r--r--lib/Sema/SemaInit.cpp1192
-rw-r--r--lib/Sema/SemaLambda.cpp125
-rw-r--r--lib/Sema/SemaLookup.cpp38
-rw-r--r--lib/Sema/SemaObjCProperty.cpp48
-rw-r--r--lib/Sema/SemaOpenMP.cpp2179
-rw-r--r--lib/Sema/SemaOverload.cpp548
-rw-r--r--lib/Sema/SemaPseudoObject.cpp14
-rw-r--r--lib/Sema/SemaStmt.cpp26
-rw-r--r--lib/Sema/SemaStmtAsm.cpp81
-rw-r--r--lib/Sema/SemaStmtAttr.cpp10
-rw-r--r--lib/Sema/SemaTemplate.cpp957
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp1669
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp144
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp457
-rw-r--r--lib/Sema/SemaTemplateVariadic.cpp84
-rw-r--r--lib/Sema/SemaType.cpp693
-rw-r--r--lib/Sema/TreeTransform.h423
-rw-r--r--lib/Sema/TypeLocBuilder.h2
-rw-r--r--lib/Serialization/ASTCommon.cpp5
-rw-r--r--lib/Serialization/ASTCommon.h1
-rw-r--r--lib/Serialization/ASTReader.cpp1067
-rw-r--r--lib/Serialization/ASTReaderDecl.cpp1427
-rw-r--r--lib/Serialization/ASTReaderInternals.h17
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp2196
-rw-r--r--lib/Serialization/ASTWriter.cpp297
-rw-r--r--lib/Serialization/ASTWriterDecl.cpp75
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp109
-rw-r--r--lib/Serialization/GeneratePCH.cpp27
-rw-r--r--lib/Serialization/GlobalModuleIndex.cpp12
-rw-r--r--lib/Serialization/Module.cpp1
-rw-r--r--lib/Serialization/ModuleFileExtension.cpp1
-rw-r--r--lib/Serialization/ModuleManager.cpp101
-rw-r--r--lib/Serialization/MultiOnDiskHashTable.h4
-rw-r--r--lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp68
-rw-r--r--lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp92
-rw-r--r--lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp77
-rw-r--r--lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp109
-rw-r--r--lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp1
-rw-r--r--lib/StaticAnalyzer/Checkers/CMakeLists.txt11
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringChecker.cpp8
-rw-r--r--lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp62
-rw-r--r--lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp21
-rw-r--r--lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp87
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp33
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp1
-rw-r--r--lib/StaticAnalyzer/Checkers/ChrootChecker.cpp1
-rw-r--r--lib/StaticAnalyzer/Checkers/CloneChecker.cpp161
-rw-r--r--lib/StaticAnalyzer/Checkers/ConversionChecker.cpp192
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp40
-rw-r--r--lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp105
-rw-r--r--lib/StaticAnalyzer/Checkers/GTestChecker.cpp299
-rw-r--r--lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp44
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h4
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp9
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h12
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h97
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h3
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp73
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocChecker.cpp87
-rw-r--r--lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp29
-rw-r--r--lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp348
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp82
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/PaddingChecker.cpp58
-rw-r--r--lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp1
-rw-r--r--lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp1
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp82
-rw-r--r--lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp1055
-rw-r--r--lib/StaticAnalyzer/Checkers/StreamChecker.cpp1
-rw-r--r--lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp105
-rw-r--r--lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/ValistChecker.cpp373
-rw-r--r--lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp121
-rw-r--r--lib/StaticAnalyzer/Core/AnalyzerOptions.cpp26
-rw-r--r--lib/StaticAnalyzer/Core/BasicValueFactory.cpp50
-rw-r--r--lib/StaticAnalyzer/Core/BugReporter.cpp107
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp197
-rw-r--r--lib/StaticAnalyzer/Core/CallEvent.cpp7
-rw-r--r--lib/StaticAnalyzer/Core/CheckerManager.cpp19
-rw-r--r--lib/StaticAnalyzer/Core/CheckerRegistry.cpp19
-rw-r--r--lib/StaticAnalyzer/Core/CoreEngine.cpp23
-rw-r--r--lib/StaticAnalyzer/Core/ExplodedGraph.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp227
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineC.cpp222
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCXX.cpp30
-rw-r--r--lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp88
-rw-r--r--lib/StaticAnalyzer/Core/IssueHash.cpp1
-rw-r--r--lib/StaticAnalyzer/Core/MemRegion.cpp42
-rw-r--r--lib/StaticAnalyzer/Core/PathDiagnostic.cpp39
-rw-r--r--lib/StaticAnalyzer/Core/PlistDiagnostics.cpp56
-rw-r--r--lib/StaticAnalyzer/Core/ProgramState.cpp33
-rw-r--r--lib/StaticAnalyzer/Core/RangeConstraintManager.cpp202
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp9
-rw-r--r--lib/StaticAnalyzer/Core/SValBuilder.cpp12
-rw-r--r--lib/StaticAnalyzer/Core/SVals.cpp53
-rw-r--r--lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp153
-rw-r--r--lib/StaticAnalyzer/Core/SimpleConstraintManager.h80
-rw-r--r--lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp41
-rw-r--r--lib/StaticAnalyzer/Core/Store.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/SymbolManager.cpp10
-rw-r--r--lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp138
-rw-r--r--lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp25
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelInjector.cpp1
-rw-r--r--lib/StaticAnalyzer/Frontend/ModelInjector.h4
-rw-r--r--lib/Tooling/ArgumentsAdjusters.cpp2
-rw-r--r--lib/Tooling/CMakeLists.txt8
-rw-r--r--lib/Tooling/CompilationDatabase.cpp8
-rw-r--r--lib/Tooling/Core/Lookup.cpp94
-rw-r--r--lib/Tooling/Core/QualTypeNames.cpp2
-rw-r--r--lib/Tooling/Core/Replacement.cpp501
-rw-r--r--lib/Tooling/JSONCompilationDatabase.cpp60
-rw-r--r--lib/Tooling/Refactoring.cpp23
-rw-r--r--lib/Tooling/RefactoringCallbacks.cpp50
-rw-r--r--lib/Tooling/Tooling.cpp10
447 files changed, 56519 insertions, 24864 deletions
diff --git a/lib/ARCMigrate/ARCMT.cpp b/lib/ARCMigrate/ARCMT.cpp
index da93d8418e78..680aa3e48da4 100644
--- a/lib/ARCMigrate/ARCMT.cpp
+++ b/lib/ARCMigrate/ARCMT.cpp
@@ -16,6 +16,7 @@
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Rewrite/Core/Rewriter.h"
#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Serialization/ASTReader.h"
diff --git a/lib/ARCMigrate/FileRemapper.cpp b/lib/ARCMigrate/FileRemapper.cpp
index 2cf20699aeef..4dedac88f982 100644
--- a/lib/ARCMigrate/FileRemapper.cpp
+++ b/lib/ARCMigrate/FileRemapper.cpp
@@ -64,7 +64,7 @@ bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
std::vector<std::pair<const FileEntry *, const FileEntry *> > pairs;
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileBuf =
- llvm::MemoryBuffer::getFile(infoFile.c_str());
+ llvm::MemoryBuffer::getFile(infoFile);
if (!fileBuf)
return report("Error opening file: " + infoFile, Diag);
diff --git a/lib/ARCMigrate/Transforms.cpp b/lib/ARCMigrate/Transforms.cpp
index 3fd36ff310f3..cb96a547fbac 100644
--- a/lib/ARCMigrate/Transforms.cpp
+++ b/lib/ARCMigrate/Transforms.cpp
@@ -11,17 +11,12 @@
#include "Internals.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
-#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Sema.h"
-#include "clang/Sema/SemaDiagnostic.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/StringSwitch.h"
-#include <map>
using namespace clang;
using namespace arcmt;
diff --git a/lib/AST/APValue.cpp b/lib/AST/APValue.cpp
index 3c587331ed07..488ad3373ca3 100644
--- a/lib/AST/APValue.cpp
+++ b/lib/AST/APValue.cpp
@@ -17,8 +17,6 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
-#include "clang/Basic/Diagnostic.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -29,6 +27,7 @@ namespace {
CharUnits Offset;
unsigned PathLength;
unsigned CallIndex;
+ bool IsNullPtr;
};
}
@@ -151,10 +150,11 @@ APValue::APValue(const APValue &RHS) : Kind(Uninitialized) {
MakeLValue();
if (RHS.hasLValuePath())
setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), RHS.getLValuePath(),
- RHS.isLValueOnePastTheEnd(), RHS.getLValueCallIndex());
+ RHS.isLValueOnePastTheEnd(), RHS.getLValueCallIndex(),
+ RHS.isNullPointer());
else
setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), NoLValuePath(),
- RHS.getLValueCallIndex());
+ RHS.getLValueCallIndex(), RHS.isNullPointer());
break;
case Array:
MakeArray(RHS.getArrayInitializedElts(), RHS.getArraySize());
@@ -263,7 +263,7 @@ LLVM_DUMP_METHOD void APValue::dump() const {
static double GetApproxValue(const llvm::APFloat &F) {
llvm::APFloat V = F;
bool ignored;
- V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
+ V.convert(llvm::APFloat::IEEEdouble(), llvm::APFloat::rmNearestTiesToEven,
&ignored);
return V.convertToDouble();
}
@@ -581,8 +581,13 @@ unsigned APValue::getLValueCallIndex() const {
return ((const LV*)(const char*)Data.buffer)->CallIndex;
}
+bool APValue::isNullPointer() const {
+ assert(isLValue() && "Invalid usage");
+ return ((const LV*)(const char*)Data.buffer)->IsNullPtr;
+}
+
void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
- unsigned CallIndex) {
+ unsigned CallIndex, bool IsNullPtr) {
assert(isLValue() && "Invalid accessor");
LV &LVal = *((LV*)(char*)Data.buffer);
LVal.BaseAndIsOnePastTheEnd.setPointer(B);
@@ -590,11 +595,12 @@ void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
LVal.Offset = O;
LVal.CallIndex = CallIndex;
LVal.resizePath((unsigned)-1);
+ LVal.IsNullPtr = IsNullPtr;
}
void APValue::setLValue(LValueBase B, const CharUnits &O,
ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
- unsigned CallIndex) {
+ unsigned CallIndex, bool IsNullPtr) {
assert(isLValue() && "Invalid accessor");
LV &LVal = *((LV*)(char*)Data.buffer);
LVal.BaseAndIsOnePastTheEnd.setPointer(B);
@@ -603,6 +609,7 @@ void APValue::setLValue(LValueBase B, const CharUnits &O,
LVal.CallIndex = CallIndex;
LVal.resizePath(Path.size());
memcpy(LVal.getPath(), Path.data(), Path.size() * sizeof(LValuePathEntry));
+ LVal.IsNullPtr = IsNullPtr;
}
const ValueDecl *APValue::getMemberPointerDecl() const {
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index 6aad4d1d570b..1b5988d01988 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -34,7 +34,6 @@
#include "clang/Basic/Builtins.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Support/Capacity.h"
@@ -652,6 +651,10 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
cast<TemplateTemplateParmDecl>(*P)));
}
+ assert(!TTP->getRequiresClause() &&
+ "Unexpected requires-clause on template template-parameter");
+ Expr *const CanonRequiresClause = nullptr;
+
TemplateTemplateParmDecl *CanonTTP
= TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
SourceLocation(), TTP->getDepth(),
@@ -661,7 +664,8 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
TemplateParameterList::Create(*this, SourceLocation(),
SourceLocation(),
CanonParams,
- SourceLocation()));
+ SourceLocation(),
+ CanonRequiresClause));
// Get the new insert position for the node we care about.
Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
@@ -700,8 +704,8 @@ static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T,
// language-specific address space.
static const unsigned FakeAddrSpaceMap[] = {
1, // opencl_global
- 2, // opencl_local
- 3, // opencl_constant
+ 3, // opencl_local
+ 2, // opencl_constant
4, // opencl_generic
5, // cuda_device
6, // cuda_constant
@@ -788,7 +792,8 @@ ASTContext::~ASTContext() {
MaterializedTemporaryValues)
MTVPair.second->~APValue();
- llvm::DeleteContainerSeconds(MangleNumberingContexts);
+ for (const auto &Value : ModuleInitializers)
+ Value.second->~PerModuleInitializers();
}
void ASTContext::ReleaseParentMapEntries() {
@@ -902,6 +907,67 @@ void ASTContext::deduplicateMergedDefinitonsFor(NamedDecl *ND) {
Merged.erase(std::remove(Merged.begin(), Merged.end(), nullptr), Merged.end());
}
+void ASTContext::PerModuleInitializers::resolve(ASTContext &Ctx) {
+ if (LazyInitializers.empty())
+ return;
+
+ auto *Source = Ctx.getExternalSource();
+ assert(Source && "lazy initializers but no external source");
+
+ auto LazyInits = std::move(LazyInitializers);
+ LazyInitializers.clear();
+
+ for (auto ID : LazyInits)
+ Initializers.push_back(Source->GetExternalDecl(ID));
+
+ assert(LazyInitializers.empty() &&
+ "GetExternalDecl for lazy module initializer added more inits");
+}
+
+void ASTContext::addModuleInitializer(Module *M, Decl *D) {
+ // One special case: if we add a module initializer that imports another
+ // module, and that module's only initializer is an ImportDecl, simplify.
+ if (auto *ID = dyn_cast<ImportDecl>(D)) {
+ auto It = ModuleInitializers.find(ID->getImportedModule());
+
+ // Maybe the ImportDecl does nothing at all. (Common case.)
+ if (It == ModuleInitializers.end())
+ return;
+
+ // Maybe the ImportDecl only imports another ImportDecl.
+ auto &Imported = *It->second;
+ if (Imported.Initializers.size() + Imported.LazyInitializers.size() == 1) {
+ Imported.resolve(*this);
+ auto *OnlyDecl = Imported.Initializers.front();
+ if (isa<ImportDecl>(OnlyDecl))
+ D = OnlyDecl;
+ }
+ }
+
+ auto *&Inits = ModuleInitializers[M];
+ if (!Inits)
+ Inits = new (*this) PerModuleInitializers;
+ Inits->Initializers.push_back(D);
+}
+
+void ASTContext::addLazyModuleInitializers(Module *M, ArrayRef<uint32_t> IDs) {
+ auto *&Inits = ModuleInitializers[M];
+ if (!Inits)
+ Inits = new (*this) PerModuleInitializers;
+ Inits->LazyInitializers.insert(Inits->LazyInitializers.end(),
+ IDs.begin(), IDs.end());
+}
+
+ArrayRef<Decl*> ASTContext::getModuleInitializers(Module *M) {
+ auto It = ModuleInitializers.find(M);
+ if (It == ModuleInitializers.end())
+ return None;
+
+ auto *Inits = It->second;
+ Inits->resolve(*this);
+ return Inits->Initializers;
+}
+
ExternCContextDecl *ASTContext::getExternCContextDecl() const {
if (!ExternCContext)
ExternCContext = ExternCContextDecl::Create(*this, getTranslationUnitDecl());
@@ -1204,9 +1270,8 @@ void ASTContext::setClassScopeSpecializationPattern(FunctionDecl *FD,
}
NamedDecl *
-ASTContext::getInstantiatedFromUsingDecl(UsingDecl *UUD) {
- llvm::DenseMap<UsingDecl *, NamedDecl *>::const_iterator Pos
- = InstantiatedFromUsingDecl.find(UUD);
+ASTContext::getInstantiatedFromUsingDecl(NamedDecl *UUD) {
+ auto Pos = InstantiatedFromUsingDecl.find(UUD);
if (Pos == InstantiatedFromUsingDecl.end())
return nullptr;
@@ -1214,11 +1279,15 @@ ASTContext::getInstantiatedFromUsingDecl(UsingDecl *UUD) {
}
void
-ASTContext::setInstantiatedFromUsingDecl(UsingDecl *Inst, NamedDecl *Pattern) {
+ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
assert((isa<UsingDecl>(Pattern) ||
isa<UnresolvedUsingValueDecl>(Pattern) ||
isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
"pattern decl is not a using decl");
+ assert((isa<UsingDecl>(Inst) ||
+ isa<UnresolvedUsingValueDecl>(Inst) ||
+ isa<UnresolvedUsingTypenameDecl>(Inst)) &&
+ "instantiation did not produce a using decl");
assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
InstantiatedFromUsingDecl[Inst] = Pattern;
}
@@ -1504,6 +1573,30 @@ bool ASTContext::isAlignmentRequired(QualType T) const {
return isAlignmentRequired(T.getTypePtr());
}
+unsigned ASTContext::getTypeAlignIfKnown(QualType T) const {
+ // An alignment on a typedef overrides anything else.
+ if (auto *TT = T->getAs<TypedefType>())
+ if (unsigned Align = TT->getDecl()->getMaxAlignment())
+ return Align;
+
+ // If we have an (array of) complete type, we're done.
+ T = getBaseElementType(T);
+ if (!T->isIncompleteType())
+ return getTypeAlign(T);
+
+ // If we had an array type, its element type might be a typedef
+ // type with an alignment attribute.
+ if (auto *TT = T->getAs<TypedefType>())
+ if (unsigned Align = TT->getDecl()->getMaxAlignment())
+ return Align;
+
+ // Otherwise, see if the declaration of the type had an attribute.
+ if (auto *TT = T->getAs<TagType>())
+ return TT->getDecl()->getMaxAlignment();
+
+ return 0;
+}
+
TypeInfo ASTContext::getTypeInfo(const Type *T) const {
TypeInfoMap::iterator I = MemoizedTypeInfo.find(T);
if (I != MemoizedTypeInfo.end())
@@ -1671,24 +1764,29 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Width = Target->getPointerWidth(0);
Align = Target->getPointerAlign(0);
break;
- case BuiltinType::OCLSampler:
- // Samplers are modeled as integers.
- Width = Target->getIntWidth();
- Align = Target->getIntAlign();
+ case BuiltinType::OCLSampler: {
+ auto AS = getTargetAddressSpace(LangAS::opencl_constant);
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
break;
+ }
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
case BuiltinType::OCLNDRange:
case BuiltinType::OCLReserveID:
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
- case BuiltinType::Id:
-#include "clang/Basic/OpenCLImageTypes.def"
-
// Currently these types are pointers to opaque types.
Width = Target->getPointerWidth(0);
Align = Target->getPointerAlign(0);
break;
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ case BuiltinType::Id:
+#include "clang/Basic/OpenCLImageTypes.def"
+ {
+ auto AS = getTargetAddressSpace(Target->getOpenCLImageAddrSpace());
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
+ }
}
break;
case Type::ObjCObjectPointer:
@@ -1787,6 +1885,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Paren:
return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
+ case Type::ObjCTypeParam:
+ return getTypeInfo(cast<ObjCTypeParamType>(T)->desugar().getTypePtr());
+
case Type::Typedef: {
const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
TypeInfo Info = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
@@ -2284,6 +2385,14 @@ static QualType getFunctionTypeWithExceptionSpec(
Proto->getExtProtoInfo().withExceptionSpec(ESI));
}
+bool ASTContext::hasSameFunctionTypeIgnoringExceptionSpec(QualType T,
+ QualType U) {
+ return hasSameType(T, U) ||
+ (getLangOpts().CPlusPlus1z &&
+ hasSameType(getFunctionTypeWithExceptionSpec(*this, T, EST_None),
+ getFunctionTypeWithExceptionSpec(*this, U, EST_None)));
+}
+
void ASTContext::adjustExceptionSpec(
FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI,
bool AsWritten) {
@@ -3039,46 +3148,160 @@ ASTContext::getCanonicalFunctionResultType(QualType ResultType) const {
return CanResultType;
}
-QualType
-ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
- const FunctionProtoType::ExtProtoInfo &EPI) const {
+static bool isCanonicalExceptionSpecification(
+ const FunctionProtoType::ExceptionSpecInfo &ESI, bool NoexceptInType) {
+ if (ESI.Type == EST_None)
+ return true;
+ if (!NoexceptInType)
+ return false;
+
+ // C++17 onwards: exception specification is part of the type, as a simple
+ // boolean "can this function type throw".
+ if (ESI.Type == EST_BasicNoexcept)
+ return true;
+
+ // A dynamic exception specification is canonical if it only contains pack
+ // expansions (so we can't tell whether it's non-throwing) and all its
+ // contained types are canonical.
+ if (ESI.Type == EST_Dynamic) {
+ bool AnyPackExpansions = false;
+ for (QualType ET : ESI.Exceptions) {
+ if (!ET.isCanonical())
+ return false;
+ if (ET->getAs<PackExpansionType>())
+ AnyPackExpansions = true;
+ }
+ return AnyPackExpansions;
+ }
+
+ // A noexcept(expr) specification is (possibly) canonical if expr is
+ // value-dependent.
+ if (ESI.Type == EST_ComputedNoexcept)
+ return ESI.NoexceptExpr && ESI.NoexceptExpr->isValueDependent();
+
+ return false;
+}
+
+QualType ASTContext::getFunctionTypeInternal(
+ QualType ResultTy, ArrayRef<QualType> ArgArray,
+ const FunctionProtoType::ExtProtoInfo &EPI, bool OnlyWantCanonical) const {
size_t NumArgs = ArgArray.size();
// Unique functions, to guarantee there is only one function of a particular
// structure.
llvm::FoldingSetNodeID ID;
FunctionProtoType::Profile(ID, ResultTy, ArgArray.begin(), NumArgs, EPI,
- *this);
+ *this, true);
+
+ QualType Canonical;
+ bool Unique = false;
void *InsertPos = nullptr;
- if (FunctionProtoType *FTP =
- FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
- return QualType(FTP, 0);
+ if (FunctionProtoType *FPT =
+ FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos)) {
+ QualType Existing = QualType(FPT, 0);
+
+ // If we find a pre-existing equivalent FunctionProtoType, we can just reuse
+ // it so long as our exception specification doesn't contain a dependent
+ // noexcept expression, or we're just looking for a canonical type.
+ // Otherwise, we're going to need to create a type
+ // sugar node to hold the concrete expression.
+ if (OnlyWantCanonical || EPI.ExceptionSpec.Type != EST_ComputedNoexcept ||
+ EPI.ExceptionSpec.NoexceptExpr == FPT->getNoexceptExpr())
+ return Existing;
+
+ // We need a new type sugar node for this one, to hold the new noexcept
+ // expression. We do no canonicalization here, but that's OK since we don't
+ // expect to see the same noexcept expression much more than once.
+ Canonical = getCanonicalType(Existing);
+ Unique = true;
+ }
+
+ bool NoexceptInType = getLangOpts().CPlusPlus1z;
+ bool IsCanonicalExceptionSpec =
+ isCanonicalExceptionSpecification(EPI.ExceptionSpec, NoexceptInType);
// Determine whether the type being created is already canonical or not.
- bool isCanonical =
- EPI.ExceptionSpec.Type == EST_None && isCanonicalResultType(ResultTy) &&
- !EPI.HasTrailingReturn;
+ bool isCanonical = !Unique && IsCanonicalExceptionSpec &&
+ isCanonicalResultType(ResultTy) && !EPI.HasTrailingReturn;
for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
if (!ArgArray[i].isCanonicalAsParam())
isCanonical = false;
- // If this type isn't canonical, get the canonical version of it.
- // The exception spec is not part of the canonical type.
- QualType Canonical;
- if (!isCanonical) {
+ if (OnlyWantCanonical)
+ assert(isCanonical &&
+ "given non-canonical parameters constructing canonical type");
+
+ // If this type isn't canonical, get the canonical version of it if we don't
+ // already have it. The exception spec is only partially part of the
+ // canonical type, and only in C++17 onwards.
+ if (!isCanonical && Canonical.isNull()) {
SmallVector<QualType, 16> CanonicalArgs;
CanonicalArgs.reserve(NumArgs);
for (unsigned i = 0; i != NumArgs; ++i)
CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
+ llvm::SmallVector<QualType, 8> ExceptionTypeStorage;
FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
CanonicalEPI.HasTrailingReturn = false;
- CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
+
+ if (IsCanonicalExceptionSpec) {
+ // Exception spec is already OK.
+ } else if (NoexceptInType) {
+ switch (EPI.ExceptionSpec.Type) {
+ case EST_Unparsed: case EST_Unevaluated: case EST_Uninstantiated:
+ // We don't know yet. It shouldn't matter what we pick here; no-one
+ // should ever look at this.
+ LLVM_FALLTHROUGH;
+ case EST_None: case EST_MSAny:
+ CanonicalEPI.ExceptionSpec.Type = EST_None;
+ break;
+
+ // A dynamic exception specification is almost always "not noexcept",
+ // with the exception that a pack expansion might expand to no types.
+ case EST_Dynamic: {
+ bool AnyPacks = false;
+ for (QualType ET : EPI.ExceptionSpec.Exceptions) {
+ if (ET->getAs<PackExpansionType>())
+ AnyPacks = true;
+ ExceptionTypeStorage.push_back(getCanonicalType(ET));
+ }
+ if (!AnyPacks)
+ CanonicalEPI.ExceptionSpec.Type = EST_None;
+ else {
+ CanonicalEPI.ExceptionSpec.Type = EST_Dynamic;
+ CanonicalEPI.ExceptionSpec.Exceptions = ExceptionTypeStorage;
+ }
+ break;
+ }
+
+ case EST_DynamicNone: case EST_BasicNoexcept:
+ CanonicalEPI.ExceptionSpec.Type = EST_BasicNoexcept;
+ break;
+
+ case EST_ComputedNoexcept:
+ llvm::APSInt Value(1);
+ auto *E = CanonicalEPI.ExceptionSpec.NoexceptExpr;
+ if (!E || !E->isIntegerConstantExpr(Value, *this, nullptr,
+ /*IsEvaluated*/false)) {
+ // This noexcept specification is invalid.
+ // FIXME: Should this be able to happen?
+ CanonicalEPI.ExceptionSpec.Type = EST_None;
+ break;
+ }
+
+ CanonicalEPI.ExceptionSpec.Type =
+ Value.getBoolValue() ? EST_BasicNoexcept : EST_None;
+ break;
+ }
+ } else {
+ CanonicalEPI.ExceptionSpec = FunctionProtoType::ExceptionSpecInfo();
+ }
// Adjust the canonical function result type.
CanQualType CanResultTy = getCanonicalFunctionResultType(ResultTy);
- Canonical = getFunctionType(CanResultTy, CanonicalArgs, CanonicalEPI);
+ Canonical =
+ getFunctionTypeInternal(CanResultTy, CanonicalArgs, CanonicalEPI, true);
// Get the new insert position for the node we care about.
FunctionProtoType *NewIP =
@@ -3121,14 +3344,14 @@ ASTContext::getFunctionType(QualType ResultTy, ArrayRef<QualType> ArgArray,
FunctionProtoType::ExtProtoInfo newEPI = EPI;
new (FTP) FunctionProtoType(ResultTy, ArgArray, Canonical, newEPI);
Types.push_back(FTP);
- FunctionProtoTypes.InsertNode(FTP, InsertPos);
+ if (!Unique)
+ FunctionProtoTypes.InsertNode(FTP, InsertPos);
return QualType(FTP, 0);
}
-/// Return pipe type for the specified type.
-QualType ASTContext::getPipeType(QualType T) const {
+QualType ASTContext::getPipeType(QualType T, bool ReadOnly) const {
llvm::FoldingSetNodeID ID;
- PipeType::Profile(ID, T);
+ PipeType::Profile(ID, T, ReadOnly);
void *InsertPos = 0;
if (PipeType *PT = PipeTypes.FindNodeOrInsertPos(ID, InsertPos))
@@ -3138,19 +3361,27 @@ QualType ASTContext::getPipeType(QualType T) const {
// either, so fill in the canonical type field.
QualType Canonical;
if (!T.isCanonical()) {
- Canonical = getPipeType(getCanonicalType(T));
+ Canonical = getPipeType(getCanonicalType(T), ReadOnly);
// Get the new insert position for the node we care about.
PipeType *NewIP = PipeTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!NewIP && "Shouldn't be in the map!");
(void)NewIP;
}
- PipeType *New = new (*this, TypeAlignment) PipeType(T, Canonical);
+ PipeType *New = new (*this, TypeAlignment) PipeType(T, Canonical, ReadOnly);
Types.push_back(New);
PipeTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
}
+QualType ASTContext::getReadPipeType(QualType T) const {
+ return getPipeType(T, true);
+}
+
+QualType ASTContext::getWritePipeType(QualType T) const {
+ return getPipeType(T, false);
+}
+
#ifndef NDEBUG
static bool NeedsInjectedClassNameType(const RecordDecl *D) {
if (!isa<CXXRecordDecl>(D)) return false;
@@ -3641,6 +3872,44 @@ ASTContext::getDependentTemplateSpecializationType(
return QualType(T, 0);
}
+void
+ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params,
+ SmallVectorImpl<TemplateArgument> &Args) {
+ Args.reserve(Args.size() + Params->size());
+
+ for (NamedDecl *Param : *Params) {
+ TemplateArgument Arg;
+ if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ QualType ArgType = getTypeDeclType(TTP);
+ if (TTP->isParameterPack())
+ ArgType = getPackExpansionType(ArgType, None);
+
+ Arg = TemplateArgument(ArgType);
+ } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ Expr *E = new (*this) DeclRefExpr(
+ NTTP, /*enclosing*/false,
+ NTTP->getType().getNonLValueExprType(*this),
+ Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
+
+ if (NTTP->isParameterPack())
+ E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
+ None);
+ Arg = TemplateArgument(E);
+ } else {
+ auto *TTP = cast<TemplateTemplateParmDecl>(Param);
+ if (TTP->isParameterPack())
+ Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>());
+ else
+ Arg = TemplateArgument(TemplateName(TTP));
+ }
+
+ if (Param->isTemplateParameterPack())
+ Arg = TemplateArgument::CreatePackCopy(*this, Arg);
+
+ Args.push_back(Arg);
+ }
+}
+
QualType ASTContext::getPackExpansionType(QualType Pattern,
Optional<unsigned> NumExpansions) {
llvm::FoldingSetNodeID ID;
@@ -3798,6 +4067,116 @@ QualType ASTContext::getObjCObjectType(
return QualType(T, 0);
}
+/// Apply Objective-C protocol qualifiers to the given type.
+/// If this is for the canonical type of a type parameter, we can apply
+/// protocol qualifiers on the ObjCObjectPointerType.
+QualType
+ASTContext::applyObjCProtocolQualifiers(QualType type,
+ ArrayRef<ObjCProtocolDecl *> protocols, bool &hasError,
+ bool allowOnPointerType) const {
+ hasError = false;
+
+ if (const ObjCTypeParamType *objT =
+ dyn_cast<ObjCTypeParamType>(type.getTypePtr())) {
+ return getObjCTypeParamType(objT->getDecl(), protocols);
+ }
+
+ // Apply protocol qualifiers to ObjCObjectPointerType.
+ if (allowOnPointerType) {
+ if (const ObjCObjectPointerType *objPtr =
+ dyn_cast<ObjCObjectPointerType>(type.getTypePtr())) {
+ const ObjCObjectType *objT = objPtr->getObjectType();
+ // Merge protocol lists and construct ObjCObjectType.
+ SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
+ protocolsVec.append(objT->qual_begin(),
+ objT->qual_end());
+ protocolsVec.append(protocols.begin(), protocols.end());
+ ArrayRef<ObjCProtocolDecl *> protocols = protocolsVec;
+ type = getObjCObjectType(
+ objT->getBaseType(),
+ objT->getTypeArgsAsWritten(),
+ protocols,
+ objT->isKindOfTypeAsWritten());
+ return getObjCObjectPointerType(type);
+ }
+ }
+
+ // Apply protocol qualifiers to ObjCObjectType.
+ if (const ObjCObjectType *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
+ // FIXME: Check for protocols to which the class type is already
+ // known to conform.
+
+ return getObjCObjectType(objT->getBaseType(),
+ objT->getTypeArgsAsWritten(),
+ protocols,
+ objT->isKindOfTypeAsWritten());
+ }
+
+ // If the canonical type is ObjCObjectType, ...
+ if (type->isObjCObjectType()) {
+ // Silently overwrite any existing protocol qualifiers.
+ // TODO: determine whether that's the right thing to do.
+
+ // FIXME: Check for protocols to which the class type is already
+ // known to conform.
+ return getObjCObjectType(type, { }, protocols, false);
+ }
+
+ // id<protocol-list>
+ if (type->isObjCIdType()) {
+ const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>();
+ type = getObjCObjectType(ObjCBuiltinIdTy, { }, protocols,
+ objPtr->isKindOfType());
+ return getObjCObjectPointerType(type);
+ }
+
+ // Class<protocol-list>
+ if (type->isObjCClassType()) {
+ const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>();
+ type = getObjCObjectType(ObjCBuiltinClassTy, { }, protocols,
+ objPtr->isKindOfType());
+ return getObjCObjectPointerType(type);
+ }
+
+ hasError = true;
+ return type;
+}
+
+QualType
+ASTContext::getObjCTypeParamType(const ObjCTypeParamDecl *Decl,
+ ArrayRef<ObjCProtocolDecl *> protocols,
+ QualType Canonical) const {
+ // Look in the folding set for an existing type.
+ llvm::FoldingSetNodeID ID;
+ ObjCTypeParamType::Profile(ID, Decl, protocols);
+ void *InsertPos = nullptr;
+ if (ObjCTypeParamType *TypeParam =
+ ObjCTypeParamTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(TypeParam, 0);
+
+ if (Canonical.isNull()) {
+ // We canonicalize to the underlying type.
+ Canonical = getCanonicalType(Decl->getUnderlyingType());
+ if (!protocols.empty()) {
+ // Apply the protocol qualifers.
+ bool hasError;
+ Canonical = applyObjCProtocolQualifiers(Canonical, protocols, hasError,
+ true/*allowOnPointerType*/);
+ assert(!hasError && "Error when apply protocol qualifier to bound type");
+ }
+ }
+
+ unsigned size = sizeof(ObjCTypeParamType);
+ size += protocols.size() * sizeof(ObjCProtocolDecl *);
+ void *mem = Allocate(size, TypeAlignment);
+ ObjCTypeParamType *newType = new (mem)
+ ObjCTypeParamType(Decl, Canonical, protocols);
+
+ Types.push_back(newType);
+ ObjCTypeParamTypes.InsertNode(newType, InsertPos);
+ return QualType(newType, 0);
+}
+
/// ObjCObjectAdoptsQTypeProtocols - Checks that protocols in IC's
/// protocol list adopt all protocols in QT's qualified-id protocol
/// list.
@@ -3983,7 +4362,7 @@ QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
DependentDecltypeType *Canon
= DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
if (!Canon) {
- // Build a new, canonical typeof(expr) type.
+ // Build a new, canonical decltype(expr) type.
Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
DependentDecltypeTypes.InsertNode(Canon, InsertPos);
}
@@ -4590,7 +4969,15 @@ QualType ASTContext::getArrayDecayedType(QualType Ty) const {
QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
// int x[restrict 4] -> int *restrict
- return getQualifiedType(PtrTy, PrettyArrayType->getIndexTypeQualifiers());
+ QualType Result = getQualifiedType(PtrTy,
+ PrettyArrayType->getIndexTypeQualifiers());
+
+ // int x[_Nullable] -> int * _Nullable
+ if (auto Nullability = Ty->getNullability(*this)) {
+ Result = const_cast<ASTContext *>(this)->getAttributedType(
+ AttributedType::getNullabilityAttrKind(*Nullability), Result, Result);
+ }
+ return Result;
}
QualType ASTContext::getBaseElementType(const ArrayType *array) const {
@@ -5223,8 +5610,9 @@ std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
return S;
}
-bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
- std::string& S) {
+std::string
+ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl) const {
+ std::string S;
// Encode result type.
getObjCEncodingForType(Decl->getReturnType(), S);
CharUnits ParmOffset;
@@ -5235,8 +5623,8 @@ bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
if (sz.isZero())
continue;
- assert (sz.isPositive() &&
- "getObjCEncodingForFunctionDecl - Incomplete param type");
+ assert(sz.isPositive() &&
+ "getObjCEncodingForFunctionDecl - Incomplete param type");
ParmOffset += sz;
}
S += charUnitsToString(ParmOffset);
@@ -5258,7 +5646,7 @@ bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
ParmOffset += getObjCEncodingTypeSize(PType);
}
- return false;
+ return S;
}
/// getObjCEncodingForMethodParameter - Return the encoded type for a single
@@ -5280,11 +5668,11 @@ void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
/// getObjCEncodingForMethodDecl - Return the encoded type for this method
/// declaration.
-bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
- std::string& S,
- bool Extended) const {
+std::string ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
+ bool Extended) const {
// FIXME: This is not very efficient.
// Encode return type.
+ std::string S;
getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
Decl->getReturnType(), S, Extended);
// Compute size of all parameters.
@@ -5330,7 +5718,7 @@ bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
ParmOffset += getObjCEncodingTypeSize(PType);
}
- return false;
+ return S;
}
ObjCPropertyImplDecl *
@@ -5378,9 +5766,9 @@ ASTContext::getObjCPropertyImplDeclForPropertyDecl(
/// kPropertyNonAtomic = 'N' // property non-atomic
/// };
/// @endcode
-void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
- const Decl *Container,
- std::string& S) const {
+std::string
+ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
+ const Decl *Container) const {
// Collect information from the property implementation decl(s).
bool Dynamic = false;
ObjCPropertyImplDecl *SynthesizePID = nullptr;
@@ -5394,7 +5782,7 @@ void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
}
// FIXME: This is not very efficient.
- S = "T";
+ std::string S = "T";
// Encode result type.
// GCC has some special rules regarding encoding of properties which
@@ -5443,6 +5831,7 @@ void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
}
// FIXME: OBJCGC: weak & strong
+ return S;
}
/// getLegacyIntegralTypeEncoding -
@@ -5833,18 +6222,20 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
ObjCInterfaceDecl *OI = T->castAs<ObjCObjectType>()->getInterface();
S += '{';
S += OI->getObjCRuntimeNameAsString();
- S += '=';
- SmallVector<const ObjCIvarDecl*, 32> Ivars;
- DeepCollectObjCIvars(OI, true, Ivars);
- for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
- const FieldDecl *Field = cast<FieldDecl>(Ivars[i]);
- if (Field->isBitField())
- getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field);
- else
- getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD,
- false, false, false, false, false,
- EncodePointerToObjCTypedef,
- NotEncodedT);
+ if (ExpandStructures) {
+ S += '=';
+ SmallVector<const ObjCIvarDecl*, 32> Ivars;
+ DeepCollectObjCIvars(OI, true, Ivars);
+ for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
+ const FieldDecl *Field = cast<FieldDecl>(Ivars[i]);
+ if (Field->isBitField())
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field);
+ else
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD,
+ false, false, false, false, false,
+ EncodePointerToObjCTypedef,
+ NotEncodedT);
+ }
}
S += '}';
return;
@@ -6369,9 +6760,8 @@ CreateX86_64ABIBuiltinVaListDecl(const ASTContext *Context) {
static TypedefDecl *CreatePNaClABIBuiltinVaListDecl(const ASTContext *Context) {
// typedef int __builtin_va_list[4];
llvm::APInt Size(Context->getTypeSize(Context->getSizeType()), 4);
- QualType IntArrayType
- = Context->getConstantArrayType(Context->IntTy,
- Size, ArrayType::Normal, 0);
+ QualType IntArrayType =
+ Context->getConstantArrayType(Context->IntTy, Size, ArrayType::Normal, 0);
return Context->buildImplicitTypedef(IntArrayType, "__builtin_va_list");
}
@@ -6567,7 +6957,7 @@ ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
QualifiedTemplateName *QTN =
QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
if (!QTN) {
- QTN = new (*this, llvm::alignOf<QualifiedTemplateName>())
+ QTN = new (*this, alignof(QualifiedTemplateName))
QualifiedTemplateName(NNS, TemplateKeyword, Template);
QualifiedTemplateNames.InsertNode(QTN, InsertPos);
}
@@ -6595,11 +6985,11 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
if (CanonNNS == NNS) {
- QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ QTN = new (*this, alignof(DependentTemplateName))
DependentTemplateName(NNS, Name);
} else {
TemplateName Canon = getDependentTemplateName(CanonNNS, Name);
- QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ QTN = new (*this, alignof(DependentTemplateName))
DependentTemplateName(NNS, Name, Canon);
DependentTemplateName *CheckQTN =
DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
@@ -6631,13 +7021,13 @@ ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
if (CanonNNS == NNS) {
- QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ QTN = new (*this, alignof(DependentTemplateName))
DependentTemplateName(NNS, Operator);
} else {
TemplateName Canon = getDependentTemplateName(CanonNNS, Operator);
- QTN = new (*this, llvm::alignOf<DependentTemplateName>())
+ QTN = new (*this, alignof(DependentTemplateName))
DependentTemplateName(NNS, Operator, Canon);
-
+
DependentTemplateName *CheckQTN
= DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
assert(!CheckQTN && "Dependent template name canonicalization broken");
@@ -7388,7 +7778,7 @@ bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
bool CompareUnqualified) {
if (getLangOpts().CPlusPlus)
return hasSameType(LHS, RHS);
-
+
return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull();
}
@@ -7902,21 +8292,9 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
}
case Type::Pipe:
{
- // Merge two pointer types, while trying to preserve typedef info
- QualType LHSValue = LHS->getAs<PipeType>()->getElementType();
- QualType RHSValue = RHS->getAs<PipeType>()->getElementType();
- if (Unqualified) {
- LHSValue = LHSValue.getUnqualifiedType();
- RHSValue = RHSValue.getUnqualifiedType();
- }
- QualType ResultType = mergeTypes(LHSValue, RHSValue, false,
- Unqualified);
- if (ResultType.isNull()) return QualType();
- if (getCanonicalType(LHSValue) == getCanonicalType(ResultType))
- return LHS;
- if (getCanonicalType(RHSValue) == getCanonicalType(ResultType))
- return RHS;
- return getPipeType(ResultType);
+ assert(LHS != RHS &&
+ "Equivalent pipe types should have already been handled!");
+ return QualType();
}
}
@@ -8197,6 +8575,10 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
Type = Context.getSizeType();
break;
+ case 'w': // wchar_t.
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'w'!");
+ Type = Context.getWideCharType();
+ break;
case 'F':
Type = Context.getCFConstantStringType();
break;
@@ -8385,13 +8767,16 @@ QualType ASTContext::GetBuiltinType(unsigned Id,
bool Variadic = (TypeStr[0] == '.');
- // We really shouldn't be making a no-proto type here, especially in C++.
- if (ArgTypes.empty() && Variadic)
+ // We really shouldn't be making a no-proto type here.
+ if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus)
return getFunctionNoProtoType(ResType, EI);
FunctionProtoType::ExtProtoInfo EPI;
EPI.ExtInfo = EI;
EPI.Variadic = Variadic;
+ if (getLangOpts().CPlusPlus && BuiltinInfo.isNoThrow(Id))
+ EPI.ExceptionSpec.Type =
+ getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
return getFunctionType(ResType, ArgTypes, EPI);
}
@@ -8576,6 +8961,8 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return !D->getDeclContext()->isDependentContext();
else if (isa<OMPDeclareReductionDecl>(D))
return !D->getDeclContext()->isDependentContext();
+ else if (isa<ImportDecl>(D))
+ return true;
else
return false;
@@ -8613,15 +9000,10 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
}
}
- GVALinkage Linkage = GetGVALinkageForFunction(FD);
-
// static, static inline, always_inline, and extern inline functions can
// always be deferred. Normal inline functions can be deferred in C99/C++.
// Implicit template instantiations can also be deferred in C++.
- if (Linkage == GVA_Internal || Linkage == GVA_AvailableExternally ||
- Linkage == GVA_DiscardableODR)
- return false;
- return true;
+ return !isDiscardableGVALinkage(GetGVALinkageForFunction(FD));
}
const VarDecl *VD = cast<VarDecl>(D);
@@ -8632,9 +9014,7 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return false;
// Variables that can be needed in other TUs are required.
- GVALinkage L = GetGVALinkageForVariable(VD);
- if (L != GVA_Internal && L != GVA_AvailableExternally &&
- L != GVA_DiscardableODR)
+ if (!isDiscardableGVALinkage(GetGVALinkageForVariable(VD)))
return true;
// Variables that have destruction with side-effects are required.
@@ -8646,6 +9026,14 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
!VD->evaluateValue())
return true;
+ // Likewise, variables with tuple-like bindings are required if their
+ // bindings have side-effects.
+ if (auto *DD = dyn_cast<DecompositionDecl>(VD))
+ for (auto *BD : DD->bindings())
+ if (auto *BindingVD = BD->getHoldingVar())
+ if (DeclMustBeEmitted(BindingVD))
+ return true;
+
return false;
}
@@ -8785,13 +9173,14 @@ unsigned ASTContext::getStaticLocalNumber(const VarDecl *VD) const {
MangleNumberingContext &
ASTContext::getManglingNumberContext(const DeclContext *DC) {
assert(LangOpts.CPlusPlus); // We don't need mangling numbers for plain C.
- MangleNumberingContext *&MCtx = MangleNumberingContexts[DC];
+ std::unique_ptr<MangleNumberingContext> &MCtx = MangleNumberingContexts[DC];
if (!MCtx)
MCtx = createMangleNumberingContext();
return *MCtx;
}
-MangleNumberingContext *ASTContext::createMangleNumberingContext() const {
+std::unique_ptr<MangleNumberingContext>
+ASTContext::createMangleNumberingContext() const {
return ABI->createMangleNumberingContext();
}
@@ -8808,18 +9197,6 @@ void ASTContext::addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
cast<CXXConstructorDecl>(CD->getFirstDecl()));
}
-void ASTContext::addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
- unsigned ParmIdx, Expr *DAE) {
- ABI->addDefaultArgExprForConstructor(
- cast<CXXConstructorDecl>(CD->getFirstDecl()), ParmIdx, DAE);
-}
-
-Expr *ASTContext::getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
- unsigned ParmIdx) {
- return ABI->getDefaultArgExprForConstructor(
- cast<CXXConstructorDecl>(CD->getFirstDecl()), ParmIdx);
-}
-
void ASTContext::addTypedefNameForUnnamedTagDecl(TagDecl *TD,
TypedefNameDecl *DD) {
return ABI->addTypedefNameForUnnamedTagDecl(TD, DD);
@@ -9098,6 +9475,16 @@ ASTContext::ObjCMethodsAreEqual(const ObjCMethodDecl *MethodDecl,
}
+uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
+ unsigned AS;
+ if (QT->getUnqualifiedDesugaredType()->isNullPtrType())
+ AS = 0;
+ else
+ AS = QT->getPointeeType().getAddressSpace();
+
+ return getTargetInfo().getNullPointerValue(AS);
+}
+
// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
// doesn't include ASTContext.h
template
diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp
index 0f5a8b5ae892..03e6115a0dba 100644
--- a/lib/AST/ASTDiagnostic.cpp
+++ b/lib/AST/ASTDiagnostic.cpp
@@ -20,7 +20,6 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -937,6 +936,9 @@ class TemplateDiff {
++(*this);
}
+ /// Return true if the iterator is non-singular.
+ bool isValid() const { return TST; }
+
/// isEnd - Returns true if the iterator is one past the end.
bool isEnd() const {
assert(TST && "InternalIterator is invalid with a null TST.");
@@ -996,21 +998,21 @@ class TemplateDiff {
}
};
- bool UseDesugaredIterator;
InternalIterator SugaredIterator;
InternalIterator DesugaredIterator;
public:
TSTiterator(ASTContext &Context, const TemplateSpecializationType *TST)
- : UseDesugaredIterator(TST->isSugared() && !TST->isTypeAlias()),
- SugaredIterator(TST),
+ : SugaredIterator(TST),
DesugaredIterator(
- GetTemplateSpecializationType(Context, TST->desugar())) {}
+ (TST->isSugared() && !TST->isTypeAlias())
+ ? GetTemplateSpecializationType(Context, TST->desugar())
+ : nullptr) {}
/// &operator++ - Increment the iterator to the next template argument.
TSTiterator &operator++() {
++SugaredIterator;
- if (UseDesugaredIterator)
+ if (DesugaredIterator.isValid())
++DesugaredIterator;
return *this;
}
@@ -1033,12 +1035,12 @@ class TemplateDiff {
/// hasDesugaredTA - Returns true if there is another TemplateArgument
/// available.
bool hasDesugaredTA() const {
- return UseDesugaredIterator && !DesugaredIterator.isEnd();
+ return DesugaredIterator.isValid() && !DesugaredIterator.isEnd();
}
/// getDesugaredTA - Returns the desugared TemplateArgument.
reference getDesugaredTA() const {
- assert(UseDesugaredIterator &&
+ assert(DesugaredIterator.isValid() &&
"Desugared TemplateArgument should not be used.");
return *DesugaredIterator;
}
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index 872ba356a9b2..62261ccc905b 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -428,6 +428,8 @@ namespace {
void VisitFunctionDecl(const FunctionDecl *D);
void VisitFieldDecl(const FieldDecl *D);
void VisitVarDecl(const VarDecl *D);
+ void VisitDecompositionDecl(const DecompositionDecl *D);
+ void VisitBindingDecl(const BindingDecl *D);
void VisitFileScopeAsmDecl(const FileScopeAsmDecl *D);
void VisitImportDecl(const ImportDecl *D);
void VisitPragmaCommentDecl(const PragmaCommentDecl *D);
@@ -515,6 +517,8 @@ namespace {
void VisitFloatingLiteral(const FloatingLiteral *Node);
void VisitStringLiteral(const StringLiteral *Str);
void VisitInitListExpr(const InitListExpr *ILE);
+ void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *ILE);
+ void VisitArrayInitIndexExpr(const ArrayInitIndexExpr *ILE);
void VisitUnaryOperator(const UnaryOperator *Node);
void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Node);
void VisitMemberExpr(const MemberExpr *Node);
@@ -543,6 +547,8 @@ namespace {
dumpDecl(Node->getLambdaClass());
}
void VisitSizeOfPackExpr(const SizeOfPackExpr *Node);
+ void
+ VisitCXXDependentScopeMemberExpr(const CXXDependentScopeMemberExpr *Node);
// ObjC
void VisitObjCAtCatchStmt(const ObjCAtCatchStmt *Node);
@@ -1133,8 +1139,15 @@ void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
if (D->isPure())
OS << " pure";
- else if (D->isDeletedAsWritten())
+ if (D->isDefaulted()) {
+ OS << " default";
+ if (D->isDeleted())
+ OS << "_delete";
+ }
+ if (D->isDeletedAsWritten())
OS << " delete";
+ if (D->isTrivial())
+ OS << " trivial";
if (const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>()) {
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
@@ -1153,11 +1166,6 @@ void ASTDumper::VisitFunctionDecl(const FunctionDecl *D) {
D->getTemplateSpecializationInfo())
dumpTemplateArgumentList(*FTSI->TemplateArguments);
- for (ArrayRef<NamedDecl *>::iterator
- I = D->getDeclsInPrototypeScope().begin(),
- E = D->getDeclsInPrototypeScope().end(); I != E; ++I)
- dumpDecl(*I);
-
if (!D->param_begin() && D->getNumParams())
dumpChild([=] { OS << "<<NULL params x " << D->getNumParams() << ">>"; });
else
@@ -1217,6 +1225,19 @@ void ASTDumper::VisitVarDecl(const VarDecl *D) {
}
}
+void ASTDumper::VisitDecompositionDecl(const DecompositionDecl *D) {
+ VisitVarDecl(D);
+ for (auto *B : D->bindings())
+ dumpDecl(B);
+}
+
+void ASTDumper::VisitBindingDecl(const BindingDecl *D) {
+ dumpName(D);
+ dumpType(D->getType());
+ if (auto *E = D->getBinding())
+ dumpStmt(E);
+}
+
void ASTDumper::VisitFileScopeAsmDecl(const FileScopeAsmDecl *D) {
dumpStmt(D->getAsmString());
}
@@ -2005,6 +2026,14 @@ void ASTDumper::VisitInitListExpr(const InitListExpr *ILE) {
}
}
+void ASTDumper::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
+ VisitExpr(E);
+}
+
+void ASTDumper::VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) {
+ VisitExpr(E);
+}
+
void ASTDumper::VisitUnaryOperator(const UnaryOperator *Node) {
VisitExpr(Node);
OS << " " << (Node->isPostfix() ? "postfix" : "prefix")
@@ -2179,6 +2208,11 @@ void ASTDumper::VisitSizeOfPackExpr(const SizeOfPackExpr *Node) {
dumpTemplateArgument(A);
}
+void ASTDumper::VisitCXXDependentScopeMemberExpr(
+ const CXXDependentScopeMemberExpr *Node) {
+ VisitExpr(Node);
+ OS << " " << (Node->isArrow() ? "->" : ".") << Node->getMember();
+}
//===----------------------------------------------------------------------===//
// Obj-C Expressions
@@ -2451,12 +2485,18 @@ void QualType::dump(const char *msg) const {
dump();
}
-LLVM_DUMP_METHOD void QualType::dump() const {
- ASTDumper Dumper(llvm::errs(), nullptr, nullptr);
+LLVM_DUMP_METHOD void QualType::dump() const { dump(llvm::errs()); }
+
+LLVM_DUMP_METHOD void QualType::dump(llvm::raw_ostream &OS) const {
+ ASTDumper Dumper(OS, nullptr, nullptr);
Dumper.dumpTypeAsChild(*this);
}
-LLVM_DUMP_METHOD void Type::dump() const { QualType(this, 0).dump(); }
+LLVM_DUMP_METHOD void Type::dump() const { dump(llvm::errs()); }
+
+LLVM_DUMP_METHOD void Type::dump(llvm::raw_ostream &OS) const {
+ QualType(this, 0).dump(OS);
+}
//===----------------------------------------------------------------------===//
// Decl method implementations
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index bc1f9f96a06b..67e96ea828bd 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -39,7 +39,9 @@ namespace clang {
// Importing types
QualType VisitType(const Type *T);
+ QualType VisitAtomicType(const AtomicType *T);
QualType VisitBuiltinType(const BuiltinType *T);
+ QualType VisitDecayedType(const DecayedType *T);
QualType VisitComplexType(const ComplexType *T);
QualType VisitPointerType(const PointerType *T);
QualType VisitBlockPointerType(const BlockPointerType *T);
@@ -88,6 +90,8 @@ namespace clang {
DeclarationNameInfo& To);
void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
+ bool ImportCastPath(CastExpr *E, CXXCastPath &Path);
+
typedef DesignatedInitExpr::Designator Designator;
Designator ImportDesignator(const Designator &D);
@@ -123,6 +127,8 @@ namespace clang {
TemplateParameterList *ImportTemplateParameterList(
TemplateParameterList *Params);
TemplateArgument ImportTemplateArgument(const TemplateArgument &From);
+ TemplateArgumentLoc ImportTemplateArgumentLoc(
+ const TemplateArgumentLoc &TALoc, bool &Error);
bool ImportTemplateArguments(const TemplateArgument *FromArgs,
unsigned NumFromArgs,
SmallVectorImpl<TemplateArgument> &ToArgs);
@@ -136,6 +142,7 @@ namespace clang {
bool IsStructuralMatch(VarTemplateDecl *From, VarTemplateDecl *To);
Decl *VisitDecl(Decl *D);
Decl *VisitAccessSpecDecl(AccessSpecDecl *D);
+ Decl *VisitStaticAssertDecl(StaticAssertDecl *D);
Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
Decl *VisitNamespaceDecl(NamespaceDecl *D);
Decl *VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
@@ -152,6 +159,7 @@ namespace clang {
Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
Decl *VisitFieldDecl(FieldDecl *D);
Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D);
+ Decl *VisitFriendDecl(FriendDecl *D);
Decl *VisitObjCIvarDecl(ObjCIvarDecl *D);
Decl *VisitVarDecl(VarDecl *D);
Decl *VisitImplicitParamDecl(ImplicitParamDecl *D);
@@ -242,16 +250,32 @@ namespace clang {
Expr *VisitConditionalOperator(ConditionalOperator *E);
Expr *VisitBinaryConditionalOperator(BinaryConditionalOperator *E);
Expr *VisitOpaqueValueExpr(OpaqueValueExpr *E);
+ Expr *VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E);
+ Expr *VisitExpressionTraitExpr(ExpressionTraitExpr *E);
+ Expr *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
Expr *VisitCompoundAssignOperator(CompoundAssignOperator *E);
Expr *VisitImplicitCastExpr(ImplicitCastExpr *E);
- Expr *VisitCStyleCastExpr(CStyleCastExpr *E);
+ Expr *VisitExplicitCastExpr(ExplicitCastExpr *E);
+ Expr *VisitOffsetOfExpr(OffsetOfExpr *OE);
+ Expr *VisitCXXThrowExpr(CXXThrowExpr *E);
+ Expr *VisitCXXNoexceptExpr(CXXNoexceptExpr *E);
+ Expr *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E);
+ Expr *VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
+ Expr *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ Expr *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *CE);
+ Expr *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
+ Expr *VisitCXXNewExpr(CXXNewExpr *CE);
+ Expr *VisitCXXDeleteExpr(CXXDeleteExpr *E);
Expr *VisitCXXConstructExpr(CXXConstructExpr *E);
Expr *VisitCXXMemberCallExpr(CXXMemberCallExpr *E);
+ Expr *VisitExprWithCleanups(ExprWithCleanups *EWC);
Expr *VisitCXXThisExpr(CXXThisExpr *E);
Expr *VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E);
Expr *VisitMemberExpr(MemberExpr *E);
Expr *VisitCallExpr(CallExpr *E);
Expr *VisitInitListExpr(InitListExpr *E);
+ Expr *VisitArrayInitLoopExpr(ArrayInitLoopExpr *E);
+ Expr *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E);
Expr *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E);
Expr *VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
@@ -272,13 +296,26 @@ namespace clang {
bool Failed = false;
std::transform(Ibegin, Iend, Obegin,
[&ImporterRef, &Failed](ItemT *From) -> ItemT * {
- ItemT *To = ImporterRef.Import(From);
+ ItemT *To = cast_or_null<ItemT>(
+ ImporterRef.Import(From));
if (!To && From)
Failed = true;
return To;
});
return Failed;
}
+
+ template<typename InContainerTy, typename OutContainerTy>
+ bool ImportContainerChecked(const InContainerTy &InContainer,
+ OutContainerTy &OutContainer) {
+ return ImportArrayChecked(InContainer.begin(), InContainer.end(),
+ OutContainer.begin());
+ }
+
+ template<typename InContainerTy, typename OIter>
+ bool ImportArrayChecked(const InContainerTy &InContainer, OIter Obegin) {
+ return ImportArrayChecked(InContainer.begin(), InContainer.end(), Obegin);
+ }
};
}
@@ -897,6 +934,23 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
}
+ case Type::ObjCTypeParam: {
+ const ObjCTypeParamType *Obj1 = cast<ObjCTypeParamType>(T1);
+ const ObjCTypeParamType *Obj2 = cast<ObjCTypeParamType>(T2);
+ if (!IsStructurallyEquivalent(Context, Obj1->getDecl(),
+ Obj2->getDecl()))
+ return false;
+
+ if (Obj1->getNumProtocols() != Obj2->getNumProtocols())
+ return false;
+ for (unsigned I = 0, N = Obj1->getNumProtocols(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Obj1->getProtocol(I),
+ Obj2->getProtocol(I)))
+ return false;
+ }
+ break;
+ }
case Type::ObjCObject: {
const ObjCObjectType *Obj1 = cast<ObjCObjectType>(T1);
const ObjCObjectType *Obj2 = cast<ObjCObjectType>(T2);
@@ -1549,6 +1603,14 @@ QualType ASTNodeImporter::VisitType(const Type *T) {
return QualType();
}
+QualType ASTNodeImporter::VisitAtomicType(const AtomicType *T){
+ QualType UnderlyingType = Importer.Import(T->getValueType());
+ if(UnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getAtomicType(UnderlyingType);
+}
+
QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
switch (T->getKind()) {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
@@ -1594,6 +1656,14 @@ QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
llvm_unreachable("Invalid BuiltinType Kind!");
}
+QualType ASTNodeImporter::VisitDecayedType(const DecayedType *T) {
+ QualType OrigT = Importer.Import(T->getOriginalType());
+ if (OrigT.isNull())
+ return QualType();
+
+ return Importer.getToContext().getDecayedType(OrigT);
+}
+
QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
QualType ToElementType = Importer.Import(T->getElementType());
if (ToElementType.isNull())
@@ -2250,23 +2320,25 @@ bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To,
TemplateParameterList *ASTNodeImporter::ImportTemplateParameterList(
TemplateParameterList *Params) {
- SmallVector<NamedDecl *, 4> ToParams;
- ToParams.reserve(Params->size());
- for (TemplateParameterList::iterator P = Params->begin(),
- PEnd = Params->end();
- P != PEnd; ++P) {
- Decl *To = Importer.Import(*P);
- if (!To)
+ SmallVector<NamedDecl *, 4> ToParams(Params->size());
+ if (ImportContainerChecked(*Params, ToParams))
+ return nullptr;
+
+ Expr *ToRequiresClause;
+ if (Expr *const R = Params->getRequiresClause()) {
+ ToRequiresClause = Importer.Import(R);
+ if (!ToRequiresClause)
return nullptr;
-
- ToParams.push_back(cast<NamedDecl>(To));
+ } else {
+ ToRequiresClause = nullptr;
}
-
+
return TemplateParameterList::Create(Importer.getToContext(),
Importer.Import(Params->getTemplateLoc()),
Importer.Import(Params->getLAngleLoc()),
ToParams,
- Importer.Import(Params->getRAngleLoc()));
+ Importer.Import(Params->getRAngleLoc()),
+ ToRequiresClause);
}
TemplateArgument
@@ -2340,6 +2412,31 @@ ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
llvm_unreachable("Invalid template argument kind");
}
+TemplateArgumentLoc ASTNodeImporter::ImportTemplateArgumentLoc(
+ const TemplateArgumentLoc &TALoc, bool &Error) {
+ Error = false;
+ TemplateArgument Arg = ImportTemplateArgument(TALoc.getArgument());
+ TemplateArgumentLocInfo FromInfo = TALoc.getLocInfo();
+ TemplateArgumentLocInfo ToInfo;
+ if (Arg.getKind() == TemplateArgument::Expression) {
+ Expr *E = Importer.Import(FromInfo.getAsExpr());
+ ToInfo = TemplateArgumentLocInfo(E);
+ if (!E)
+ Error = true;
+ } else if (Arg.getKind() == TemplateArgument::Type) {
+ if (TypeSourceInfo *TSI = Importer.Import(FromInfo.getAsTypeSourceInfo()))
+ ToInfo = TemplateArgumentLocInfo(TSI);
+ else
+ Error = true;
+ } else {
+ ToInfo = TemplateArgumentLocInfo(
+ Importer.Import(FromInfo.getTemplateQualifierLoc()),
+ Importer.Import(FromInfo.getTemplateNameLoc()),
+ Importer.Import(FromInfo.getTemplateEllipsisLoc()));
+ }
+ return TemplateArgumentLoc(Arg, ToInfo);
+}
+
bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
unsigned NumFromArgs,
SmallVectorImpl<TemplateArgument> &ToArgs) {
@@ -2454,6 +2551,35 @@ Decl *ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) {
return accessSpecDecl;
}
+Decl *ASTNodeImporter::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return nullptr;
+
+ DeclContext *LexicalDC = DC;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ Expr *AssertExpr = Importer.Import(D->getAssertExpr());
+ if (!AssertExpr)
+ return nullptr;
+
+ StringLiteral *FromMsg = D->getMessage();
+ StringLiteral *ToMsg = cast_or_null<StringLiteral>(Importer.Import(FromMsg));
+ if (!ToMsg && FromMsg)
+ return nullptr;
+
+ StaticAssertDecl *ToD = StaticAssertDecl::Create(
+ Importer.getToContext(), DC, Loc, AssertExpr, ToMsg,
+ Importer.Import(D->getRParenLoc()), D->isFailed());
+
+ ToD->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToD);
+ Importer.Imported(D, ToD);
+ return ToD;
+}
+
Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
// Import the major distinguishing characteristics of this namespace.
DeclContext *DC, *LexicalDC;
@@ -3303,6 +3429,70 @@ Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
return ToIndirectField;
}
+Decl *ASTNodeImporter::VisitFriendDecl(FriendDecl *D) {
+ // Import the major distinguishing characteristics of a declaration.
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ DeclContext *LexicalDC = D->getDeclContext() == D->getLexicalDeclContext()
+ ? DC : Importer.ImportContext(D->getLexicalDeclContext());
+ if (!DC || !LexicalDC)
+ return nullptr;
+
+ // Determine whether we've already imported this decl.
+ // FriendDecl is not a NamedDecl so we cannot use localUncachedLookup.
+ auto *RD = cast<CXXRecordDecl>(DC);
+ FriendDecl *ImportedFriend = RD->getFirstFriend();
+ StructuralEquivalenceContext Context(
+ Importer.getFromContext(), Importer.getToContext(),
+ Importer.getNonEquivalentDecls(), false, false);
+
+ while (ImportedFriend) {
+ if (D->getFriendDecl() && ImportedFriend->getFriendDecl()) {
+ if (Context.IsStructurallyEquivalent(D->getFriendDecl(),
+ ImportedFriend->getFriendDecl()))
+ return Importer.Imported(D, ImportedFriend);
+
+ } else if (D->getFriendType() && ImportedFriend->getFriendType()) {
+ if (Importer.IsStructurallyEquivalent(
+ D->getFriendType()->getType(),
+ ImportedFriend->getFriendType()->getType(), true))
+ return Importer.Imported(D, ImportedFriend);
+ }
+ ImportedFriend = ImportedFriend->getNextFriend();
+ }
+
+ // Not found. Create it.
+ FriendDecl::FriendUnion ToFU;
+ if (NamedDecl *FriendD = D->getFriendDecl())
+ ToFU = cast_or_null<NamedDecl>(Importer.Import(FriendD));
+ else
+ ToFU = Importer.Import(D->getFriendType());
+ if (!ToFU)
+ return nullptr;
+
+ SmallVector<TemplateParameterList *, 1> ToTPLists(D->NumTPLists);
+ TemplateParameterList **FromTPLists =
+ D->getTrailingObjects<TemplateParameterList *>();
+ for (unsigned I = 0; I < D->NumTPLists; I++) {
+ TemplateParameterList *List = ImportTemplateParameterList(FromTPLists[I]);
+ if (!List)
+ return nullptr;
+ ToTPLists[I] = List;
+ }
+
+ FriendDecl *FrD = FriendDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getLocation()),
+ ToFU, Importer.Import(D->getFriendLoc()),
+ ToTPLists);
+
+ Importer.Imported(D, FrD);
+ RD->pushFriendDecl(FrD);
+
+ FrD->setAccess(D->getAccess());
+ FrD->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(FrD);
+ return FrD;
+}
+
Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
// Import the major distinguishing characteristics of an ivar.
DeclContext *DC, *LexicalDC;
@@ -4859,11 +5049,10 @@ Stmt *ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
}
SmallVector<Expr *, 4> Exprs(S->getNumOutputs() + S->getNumInputs());
- if (ImportArrayChecked(S->begin_outputs(), S->end_outputs(), Exprs.begin()))
+ if (ImportContainerChecked(S->outputs(), Exprs))
return nullptr;
- if (ImportArrayChecked(S->begin_inputs(), S->end_inputs(),
- Exprs.begin() + S->getNumOutputs()))
+ if (ImportArrayChecked(S->inputs(), Exprs.begin() + S->getNumOutputs()))
return nullptr;
StringLiteral *AsmStr = cast_or_null<StringLiteral>(
@@ -4906,8 +5095,8 @@ Stmt *ASTNodeImporter::VisitNullStmt(NullStmt *S) {
Stmt *ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
llvm::SmallVector<Stmt *, 8> ToStmts(S->size());
-
- if (ImportArrayChecked(S->body_begin(), S->body_end(), ToStmts.begin()))
+
+ if (ImportContainerChecked(S->body(), ToStmts))
return nullptr;
SourceLocation ToLBraceLoc = Importer.Import(S->getLBracLoc());
@@ -5361,7 +5550,7 @@ Expr *ASTNodeImporter::VisitGNUNullExpr(GNUNullExpr *E) {
return nullptr;
return new (Importer.getToContext()) GNUNullExpr(
- T, Importer.Import(E->getExprLoc()));
+ T, Importer.Import(E->getLocStart()));
}
Expr *ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
@@ -5375,7 +5564,7 @@ Expr *ASTNodeImporter::VisitPredefinedExpr(PredefinedExpr *E) {
return nullptr;
return new (Importer.getToContext()) PredefinedExpr(
- Importer.Import(E->getExprLoc()), T, E->getIdentType(), SL);
+ Importer.Import(E->getLocStart()), T, E->getIdentType(), SL);
}
Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
@@ -5394,6 +5583,20 @@ Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
if (T.isNull())
return nullptr;
+
+ TemplateArgumentListInfo ToTAInfo;
+ TemplateArgumentListInfo *ResInfo = nullptr;
+ if (E->hasExplicitTemplateArgs()) {
+ for (const auto &FromLoc : E->template_arguments()) {
+ bool Error = false;
+ TemplateArgumentLoc ToTALoc = ImportTemplateArgumentLoc(FromLoc, Error);
+ if (Error)
+ return nullptr;
+ ToTAInfo.addArgument(ToTALoc);
+ }
+ ResInfo = &ToTAInfo;
+ }
+
DeclRefExpr *DRE = DeclRefExpr::Create(Importer.getToContext(),
Importer.Import(E->getQualifierLoc()),
Importer.Import(E->getTemplateKeywordLoc()),
@@ -5401,8 +5604,7 @@ Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
E->refersToEnclosingVariableOrCapture(),
Importer.Import(E->getLocation()),
T, E->getValueKind(),
- FoundD,
- /*FIXME:TemplateArgs=*/nullptr);
+ FoundD, ResInfo);
if (E->hadMultipleCandidates())
DRE->setHadMultipleCandidates(true);
return DRE;
@@ -5411,7 +5613,7 @@ Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
Expr *ASTNodeImporter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
- return NULL;
+ return nullptr;
return new (Importer.getToContext()) ImplicitValueInitExpr(T);
}
@@ -5580,8 +5782,7 @@ Expr *ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
Expr *ASTNodeImporter::VisitParenListExpr(ParenListExpr *E) {
SmallVector<Expr *, 4> Exprs(E->getNumExprs());
- if (ImportArrayChecked(
- E->getExprs(), E->getExprs() + E->getNumExprs(), Exprs.begin()))
+ if (ImportContainerChecked(E->exprs(), Exprs))
return nullptr;
return new (Importer.getToContext()) ParenListExpr(
@@ -5719,6 +5920,38 @@ Expr *ASTNodeImporter::VisitBinaryConditionalOperator(
T, E->getValueKind(), E->getObjectKind());
}
+Expr *ASTNodeImporter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ TypeSourceInfo *ToQueried = Importer.Import(E->getQueriedTypeSourceInfo());
+ if (!ToQueried)
+ return nullptr;
+
+ Expr *Dim = Importer.Import(E->getDimensionExpression());
+ if (!Dim && E->getDimensionExpression())
+ return nullptr;
+
+ return new (Importer.getToContext()) ArrayTypeTraitExpr(
+ Importer.Import(E->getLocStart()), E->getTrait(), ToQueried,
+ E->getValue(), Dim, Importer.Import(E->getLocEnd()), T);
+}
+
+Expr *ASTNodeImporter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *ToQueried = Importer.Import(E->getQueriedExpression());
+ if (!ToQueried)
+ return nullptr;
+
+ return new (Importer.getToContext()) ExpressionTraitExpr(
+ Importer.Import(E->getLocStart()), E->getTrait(), ToQueried,
+ E->getValue(), Importer.Import(E->getLocEnd()), T);
+}
+
Expr *ASTNodeImporter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
@@ -5729,10 +5962,28 @@ Expr *ASTNodeImporter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
return nullptr;
return new (Importer.getToContext()) OpaqueValueExpr(
- Importer.Import(E->getExprLoc()), T, E->getValueKind(),
+ Importer.Import(E->getLocation()), T, E->getValueKind(),
E->getObjectKind(), SourceExpr);
}
+Expr *ASTNodeImporter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *ToLHS = Importer.Import(E->getLHS());
+ if (!ToLHS)
+ return nullptr;
+
+ Expr *ToRHS = Importer.Import(E->getRHS());
+ if (!ToRHS)
+ return nullptr;
+
+ return new (Importer.getToContext()) ArraySubscriptExpr(
+ ToLHS, ToRHS, T, E->getValueKind(), E->getObjectKind(),
+ Importer.Import(E->getRBracketLoc()));
+}
+
Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
@@ -5763,11 +6014,14 @@ Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
E->isFPContractable());
}
-static bool ImportCastPath(CastExpr *E, CXXCastPath &Path) {
- if (E->path_empty()) return false;
-
- // TODO: import cast paths
- return true;
+bool ASTNodeImporter::ImportCastPath(CastExpr *CE, CXXCastPath &Path) {
+ for (auto I = CE->path_begin(), E = CE->path_end(); I != E; ++I) {
+ if (CXXBaseSpecifier *Spec = Importer.Import(*I))
+ Path.push_back(Spec);
+ else
+ return true;
+ }
+ return false;
}
Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
@@ -5787,7 +6041,7 @@ Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
SubExpr, &BasePath, E->getValueKind());
}
-Expr *ASTNodeImporter::VisitCStyleCastExpr(CStyleCastExpr *E) {
+Expr *ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
return nullptr;
@@ -5804,11 +6058,320 @@ Expr *ASTNodeImporter::VisitCStyleCastExpr(CStyleCastExpr *E) {
if (ImportCastPath(E, BasePath))
return nullptr;
- return CStyleCastExpr::Create(Importer.getToContext(), T,
- E->getValueKind(), E->getCastKind(),
- SubExpr, &BasePath, TInfo,
- Importer.Import(E->getLParenLoc()),
- Importer.Import(E->getRParenLoc()));
+ switch (E->getStmtClass()) {
+ case Stmt::CStyleCastExprClass: {
+ CStyleCastExpr *CCE = cast<CStyleCastExpr>(E);
+ return CStyleCastExpr::Create(Importer.getToContext(), T,
+ E->getValueKind(), E->getCastKind(),
+ SubExpr, &BasePath, TInfo,
+ Importer.Import(CCE->getLParenLoc()),
+ Importer.Import(CCE->getRParenLoc()));
+ }
+
+ case Stmt::CXXFunctionalCastExprClass: {
+ CXXFunctionalCastExpr *FCE = cast<CXXFunctionalCastExpr>(E);
+ return CXXFunctionalCastExpr::Create(Importer.getToContext(), T,
+ E->getValueKind(), TInfo,
+ E->getCastKind(), SubExpr, &BasePath,
+ Importer.Import(FCE->getLParenLoc()),
+ Importer.Import(FCE->getRParenLoc()));
+ }
+
+ case Stmt::ObjCBridgedCastExprClass: {
+ ObjCBridgedCastExpr *OCE = cast<ObjCBridgedCastExpr>(E);
+ return new (Importer.getToContext()) ObjCBridgedCastExpr(
+ Importer.Import(OCE->getLParenLoc()), OCE->getBridgeKind(),
+ E->getCastKind(), Importer.Import(OCE->getBridgeKeywordLoc()),
+ TInfo, SubExpr);
+ }
+ default:
+ break; // just fall through
+ }
+
+ CXXNamedCastExpr *Named = cast<CXXNamedCastExpr>(E);
+ SourceLocation ExprLoc = Importer.Import(Named->getOperatorLoc()),
+ RParenLoc = Importer.Import(Named->getRParenLoc());
+ SourceRange Brackets = Importer.Import(Named->getAngleBrackets());
+
+ switch (E->getStmtClass()) {
+ case Stmt::CXXStaticCastExprClass:
+ return CXXStaticCastExpr::Create(Importer.getToContext(), T,
+ E->getValueKind(), E->getCastKind(),
+ SubExpr, &BasePath, TInfo,
+ ExprLoc, RParenLoc, Brackets);
+
+ case Stmt::CXXDynamicCastExprClass:
+ return CXXDynamicCastExpr::Create(Importer.getToContext(), T,
+ E->getValueKind(), E->getCastKind(),
+ SubExpr, &BasePath, TInfo,
+ ExprLoc, RParenLoc, Brackets);
+
+ case Stmt::CXXReinterpretCastExprClass:
+ return CXXReinterpretCastExpr::Create(Importer.getToContext(), T,
+ E->getValueKind(), E->getCastKind(),
+ SubExpr, &BasePath, TInfo,
+ ExprLoc, RParenLoc, Brackets);
+
+ case Stmt::CXXConstCastExprClass:
+ return CXXConstCastExpr::Create(Importer.getToContext(), T,
+ E->getValueKind(), SubExpr, TInfo, ExprLoc,
+ RParenLoc, Brackets);
+ default:
+ llvm_unreachable("Cast expression of unsupported type!");
+ return nullptr;
+ }
+}
+
+Expr *ASTNodeImporter::VisitOffsetOfExpr(OffsetOfExpr *OE) {
+ QualType T = Importer.Import(OE->getType());
+ if (T.isNull())
+ return nullptr;
+
+ SmallVector<OffsetOfNode, 4> Nodes;
+ for (int I = 0, E = OE->getNumComponents(); I < E; ++I) {
+ const OffsetOfNode &Node = OE->getComponent(I);
+
+ switch (Node.getKind()) {
+ case OffsetOfNode::Array:
+ Nodes.push_back(OffsetOfNode(Importer.Import(Node.getLocStart()),
+ Node.getArrayExprIndex(),
+ Importer.Import(Node.getLocEnd())));
+ break;
+
+ case OffsetOfNode::Base: {
+ CXXBaseSpecifier *BS = Importer.Import(Node.getBase());
+ if (!BS && Node.getBase())
+ return nullptr;
+ Nodes.push_back(OffsetOfNode(BS));
+ break;
+ }
+ case OffsetOfNode::Field: {
+ FieldDecl *FD = cast_or_null<FieldDecl>(Importer.Import(Node.getField()));
+ if (!FD)
+ return nullptr;
+ Nodes.push_back(OffsetOfNode(Importer.Import(Node.getLocStart()), FD,
+ Importer.Import(Node.getLocEnd())));
+ break;
+ }
+ case OffsetOfNode::Identifier: {
+ IdentifierInfo *ToII = Importer.Import(Node.getFieldName());
+ if (!ToII)
+ return nullptr;
+ Nodes.push_back(OffsetOfNode(Importer.Import(Node.getLocStart()), ToII,
+ Importer.Import(Node.getLocEnd())));
+ break;
+ }
+ }
+ }
+
+ SmallVector<Expr *, 4> Exprs(OE->getNumExpressions());
+ for (int I = 0, E = OE->getNumExpressions(); I < E; ++I) {
+ Expr *ToIndexExpr = Importer.Import(OE->getIndexExpr(I));
+ if (!ToIndexExpr)
+ return nullptr;
+ Exprs[I] = ToIndexExpr;
+ }
+
+ TypeSourceInfo *TInfo = Importer.Import(OE->getTypeSourceInfo());
+ if (!TInfo && OE->getTypeSourceInfo())
+ return nullptr;
+
+ return OffsetOfExpr::Create(Importer.getToContext(), T,
+ Importer.Import(OE->getOperatorLoc()),
+ TInfo, Nodes, Exprs,
+ Importer.Import(OE->getRParenLoc()));
+}
+
+Expr *ASTNodeImporter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *Operand = Importer.Import(E->getOperand());
+ if (!Operand)
+ return nullptr;
+
+ CanThrowResult CanThrow;
+ if (E->isValueDependent())
+ CanThrow = CT_Dependent;
+ else
+ CanThrow = E->getValue() ? CT_Can : CT_Cannot;
+
+ return new (Importer.getToContext()) CXXNoexceptExpr(
+ T, Operand, CanThrow,
+ Importer.Import(E->getLocStart()), Importer.Import(E->getLocEnd()));
+}
+
+Expr *ASTNodeImporter::VisitCXXThrowExpr(CXXThrowExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr && E->getSubExpr())
+ return nullptr;
+
+ return new (Importer.getToContext()) CXXThrowExpr(
+ SubExpr, T, Importer.Import(E->getThrowLoc()),
+ E->isThrownVariableInScope());
+}
+
+Expr *ASTNodeImporter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ ParmVarDecl *Param = cast_or_null<ParmVarDecl>(
+ Importer.Import(E->getParam()));
+ if (!Param)
+ return nullptr;
+
+ return CXXDefaultArgExpr::Create(
+ Importer.getToContext(), Importer.Import(E->getUsedLocation()), Param);
+}
+
+Expr *ASTNodeImporter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ TypeSourceInfo *TypeInfo = Importer.Import(E->getTypeSourceInfo());
+ if (!TypeInfo)
+ return nullptr;
+
+ return new (Importer.getToContext()) CXXScalarValueInitExpr(
+ T, TypeInfo, Importer.Import(E->getRParenLoc()));
+}
+
+Expr *ASTNodeImporter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return nullptr;
+
+ auto *Dtor = cast_or_null<CXXDestructorDecl>(
+ Importer.Import(const_cast<CXXDestructorDecl *>(
+ E->getTemporary()->getDestructor())));
+ if (!Dtor)
+ return nullptr;
+
+ ASTContext &ToCtx = Importer.getToContext();
+ CXXTemporary *Temp = CXXTemporary::Create(ToCtx, Dtor);
+ return CXXBindTemporaryExpr::Create(ToCtx, Temp, SubExpr);
+}
+
+Expr *ASTNodeImporter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *CE) {
+ QualType T = Importer.Import(CE->getType());
+ if (T.isNull())
+ return nullptr;
+
+ SmallVector<Expr *, 8> Args(CE->getNumArgs());
+ if (ImportContainerChecked(CE->arguments(), Args))
+ return nullptr;
+
+ auto *Ctor = cast_or_null<CXXConstructorDecl>(
+ Importer.Import(CE->getConstructor()));
+ if (!Ctor)
+ return nullptr;
+
+ return CXXTemporaryObjectExpr::Create(
+ Importer.getToContext(), T,
+ Importer.Import(CE->getLocStart()),
+ Ctor,
+ CE->isElidable(),
+ Args,
+ CE->hadMultipleCandidates(),
+ CE->isListInitialization(),
+ CE->isStdInitListInitialization(),
+ CE->requiresZeroInitialization(),
+ CE->getConstructionKind(),
+ Importer.Import(CE->getParenOrBraceRange()));
+}
+
+Expr *
+ASTNodeImporter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ Expr *TempE = Importer.Import(E->GetTemporaryExpr());
+ if (!TempE)
+ return nullptr;
+
+ ValueDecl *ExtendedBy = cast_or_null<ValueDecl>(
+ Importer.Import(const_cast<ValueDecl *>(E->getExtendingDecl())));
+ if (!ExtendedBy && E->getExtendingDecl())
+ return nullptr;
+
+ auto *ToMTE = new (Importer.getToContext()) MaterializeTemporaryExpr(
+ T, TempE, E->isBoundToLvalueReference());
+
+ // FIXME: Should ManglingNumber get numbers associated with 'to' context?
+ ToMTE->setExtendingDecl(ExtendedBy, E->getManglingNumber());
+ return ToMTE;
+}
+
+Expr *ASTNodeImporter::VisitCXXNewExpr(CXXNewExpr *CE) {
+ QualType T = Importer.Import(CE->getType());
+ if (T.isNull())
+ return nullptr;
+
+ SmallVector<Expr *, 4> PlacementArgs(CE->getNumPlacementArgs());
+ if (ImportContainerChecked(CE->placement_arguments(), PlacementArgs))
+ return nullptr;
+
+ FunctionDecl *OperatorNewDecl = cast_or_null<FunctionDecl>(
+ Importer.Import(CE->getOperatorNew()));
+ if (!OperatorNewDecl && CE->getOperatorNew())
+ return nullptr;
+
+ FunctionDecl *OperatorDeleteDecl = cast_or_null<FunctionDecl>(
+ Importer.Import(CE->getOperatorDelete()));
+ if (!OperatorDeleteDecl && CE->getOperatorDelete())
+ return nullptr;
+
+ Expr *ToInit = Importer.Import(CE->getInitializer());
+ if (!ToInit && CE->getInitializer())
+ return nullptr;
+
+ TypeSourceInfo *TInfo = Importer.Import(CE->getAllocatedTypeSourceInfo());
+ if (!TInfo)
+ return nullptr;
+
+ Expr *ToArrSize = Importer.Import(CE->getArraySize());
+ if (!ToArrSize && CE->getArraySize())
+ return nullptr;
+
+ return new (Importer.getToContext()) CXXNewExpr(
+ Importer.getToContext(),
+ CE->isGlobalNew(),
+ OperatorNewDecl, OperatorDeleteDecl,
+ CE->passAlignment(),
+ CE->doesUsualArrayDeleteWantSize(),
+ PlacementArgs,
+ Importer.Import(CE->getTypeIdParens()),
+ ToArrSize, CE->getInitializationStyle(), ToInit, T, TInfo,
+ Importer.Import(CE->getSourceRange()),
+ Importer.Import(CE->getDirectInitRange()));
+}
+
+Expr *ASTNodeImporter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ FunctionDecl *OperatorDeleteDecl = cast_or_null<FunctionDecl>(
+ Importer.Import(E->getOperatorDelete()));
+ if (!OperatorDeleteDecl && E->getOperatorDelete())
+ return nullptr;
+
+ Expr *ToArg = Importer.Import(E->getArgument());
+ if (!ToArg && E->getArgument())
+ return nullptr;
+
+ return new (Importer.getToContext()) CXXDeleteExpr(
+ T, E->isGlobalDelete(),
+ E->isArrayForm(),
+ E->isArrayFormAsWritten(),
+ E->doesUsualArrayDeleteWantSize(),
+ OperatorDeleteDecl,
+ ToArg,
+ Importer.Import(E->getLocStart()));
}
Expr *ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
@@ -5822,8 +6385,7 @@ Expr *ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
return nullptr;
SmallVector<Expr *, 6> ToArgs(E->getNumArgs());
- if (ImportArrayChecked(E->getArgs(), E->getArgs() + E->getNumArgs(),
- ToArgs.begin()))
+ if (ImportContainerChecked(E->arguments(), ToArgs))
return nullptr;
return CXXConstructExpr::Create(Importer.getToContext(), T,
@@ -5837,6 +6399,24 @@ Expr *ASTNodeImporter::VisitCXXConstructExpr(CXXConstructExpr *E) {
Importer.Import(E->getParenOrBraceRange()));
}
+Expr *ASTNodeImporter::VisitExprWithCleanups(ExprWithCleanups *EWC) {
+ Expr *SubExpr = Importer.Import(EWC->getSubExpr());
+ if (!SubExpr && EWC->getSubExpr())
+ return nullptr;
+
+ SmallVector<ExprWithCleanups::CleanupObject, 8> Objs(EWC->getNumObjects());
+ for (unsigned I = 0, E = EWC->getNumObjects(); I < E; I++)
+ if (ExprWithCleanups::CleanupObject Obj =
+ cast_or_null<BlockDecl>(Importer.Import(EWC->getObject(I))))
+ Objs[I] = Obj;
+ else
+ return nullptr;
+
+ return ExprWithCleanups::Create(Importer.getToContext(),
+ SubExpr, EWC->cleanupsHaveSideEffects(),
+ Objs);
+}
+
Expr *ASTNodeImporter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
@@ -5847,8 +6427,7 @@ Expr *ASTNodeImporter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
return nullptr;
SmallVector<Expr *, 4> ToArgs(E->getNumArgs());
-
- if (ImportArrayChecked(E->arg_begin(), E->arg_end(), ToArgs.begin()))
+ if (ImportContainerChecked(E->arguments(), ToArgs))
return nullptr;
return new (Importer.getToContext()) CXXMemberCallExpr(
@@ -5949,8 +6528,7 @@ Expr *ASTNodeImporter::VisitInitListExpr(InitListExpr *ILE) {
return nullptr;
llvm::SmallVector<Expr *, 4> Exprs(ILE->getNumInits());
- if (ImportArrayChecked(
- ILE->getInits(), ILE->getInits() + ILE->getNumInits(), Exprs.begin()))
+ if (ImportContainerChecked(ILE->inits(), Exprs))
return nullptr;
ASTContext &ToCtx = Importer.getToContext();
@@ -5988,6 +6566,30 @@ Expr *ASTNodeImporter::VisitInitListExpr(InitListExpr *ILE) {
return To;
}
+Expr *ASTNodeImporter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) {
+ QualType ToType = Importer.Import(E->getType());
+ if (ToType.isNull())
+ return nullptr;
+
+ Expr *ToCommon = Importer.Import(E->getCommonExpr());
+ if (!ToCommon && E->getCommonExpr())
+ return nullptr;
+
+ Expr *ToSubExpr = Importer.Import(E->getSubExpr());
+ if (!ToSubExpr && E->getSubExpr())
+ return nullptr;
+
+ return new (Importer.getToContext())
+ ArrayInitLoopExpr(ToType, ToCommon, ToSubExpr);
+}
+
+Expr *ASTNodeImporter::VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
+ QualType ToType = Importer.Import(E->getType());
+ if (ToType.isNull())
+ return nullptr;
+ return new (Importer.getToContext()) ArrayInitIndexExpr(ToType);
+}
+
Expr *ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
FieldDecl *ToField = llvm::dyn_cast_or_null<FieldDecl>(
Importer.Import(DIE->getField()));
@@ -6376,10 +6978,10 @@ SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
SourceManager &FromSM = FromContext.getSourceManager();
- // For now, map everything down to its spelling location, so that we
+ // For now, map everything down to its file location, so that we
// don't have to import macro expansions.
// FIXME: Import macro expansions!
- FromLoc = FromSM.getSpellingLoc(FromLoc);
+ FromLoc = FromSM.getFileLoc(FromLoc);
std::pair<FileID, unsigned> Decomposed = FromSM.getDecomposedLoc(FromLoc);
SourceManager &ToSM = ToContext.getSourceManager();
FileID ToFileID = Import(Decomposed.first);
@@ -6478,31 +7080,27 @@ CXXCtorInitializer *ASTImporter::Import(CXXCtorInitializer *From) {
return new (ToContext)
CXXCtorInitializer(ToContext, ToTInfo, Import(From->getLParenLoc()),
ToExpr, Import(From->getRParenLoc()));
- } else if (unsigned NumArrayIndices = From->getNumArrayIndices()) {
- FieldDecl *ToField =
- llvm::cast_or_null<FieldDecl>(Import(From->getMember()));
- if (!ToField && From->getMember())
- return nullptr;
-
- SmallVector<VarDecl *, 4> ToAIs(NumArrayIndices);
-
- for (unsigned AII = 0; AII < NumArrayIndices; ++AII) {
- VarDecl *ToArrayIndex =
- dyn_cast_or_null<VarDecl>(Import(From->getArrayIndex(AII)));
- if (!ToArrayIndex && From->getArrayIndex(AII))
- return nullptr;
- }
-
- return CXXCtorInitializer::Create(
- ToContext, ToField, Import(From->getMemberLocation()),
- Import(From->getLParenLoc()), ToExpr, Import(From->getRParenLoc()),
- ToAIs.data(), NumArrayIndices);
} else {
return nullptr;
}
}
+CXXBaseSpecifier *ASTImporter::Import(const CXXBaseSpecifier *BaseSpec) {
+ auto Pos = ImportedCXXBaseSpecifiers.find(BaseSpec);
+ if (Pos != ImportedCXXBaseSpecifiers.end())
+ return Pos->second;
+
+ CXXBaseSpecifier *Imported = new (ToContext) CXXBaseSpecifier(
+ Import(BaseSpec->getSourceRange()),
+ BaseSpec->isVirtual(), BaseSpec->isBaseOfClass(),
+ BaseSpec->getAccessSpecifierAsWritten(),
+ Import(BaseSpec->getTypeSourceInfo()),
+ Import(BaseSpec->getEllipsisLoc()));
+ ImportedCXXBaseSpecifiers[BaseSpec] = Imported;
+ return Imported;
+}
+
void ASTImporter::ImportDefinition(Decl *From) {
Decl *To = Import(From);
if (!To)
diff --git a/lib/AST/ASTTypeTraits.cpp b/lib/AST/ASTTypeTraits.cpp
index 2336c98fe049..461084ce707c 100644
--- a/lib/AST/ASTTypeTraits.cpp
+++ b/lib/AST/ASTTypeTraits.cpp
@@ -23,6 +23,7 @@ namespace ast_type_traits {
const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
{ NKI_None, "<None>" },
{ NKI_None, "TemplateArgument" },
+ { NKI_None, "TemplateName" },
{ NKI_None, "NestedNameSpecifierLoc" },
{ NKI_None, "QualType" },
{ NKI_None, "TypeLoc" },
@@ -109,6 +110,8 @@ void DynTypedNode::print(llvm::raw_ostream &OS,
const PrintingPolicy &PP) const {
if (const TemplateArgument *TA = get<TemplateArgument>())
TA->print(PP, OS);
+ else if (const TemplateName *TN = get<TemplateName>())
+ TN->print(OS, PP);
else if (const NestedNameSpecifier *NNS = get<NestedNameSpecifier>())
NNS->print(OS, PP);
else if (const NestedNameSpecifierLoc *NNSL = get<NestedNameSpecifierLoc>())
@@ -132,6 +135,8 @@ void DynTypedNode::dump(llvm::raw_ostream &OS, SourceManager &SM) const {
D->dump(OS);
else if (const Stmt *S = get<Stmt>())
S->dump(OS, SM);
+ else if (const Type *T = get<Type>())
+ T->dump(OS);
else
OS << "Unable to dump values of type " << NodeKind.asStringRef() << "\n";
}
diff --git a/lib/AST/AttrImpl.cpp b/lib/AST/AttrImpl.cpp
index cb608700133c..b06b50c9b4b8 100644
--- a/lib/AST/AttrImpl.cpp
+++ b/lib/AST/AttrImpl.cpp
@@ -11,11 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/Attr.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
-#include "llvm/ADT/StringSwitch.h"
using namespace clang;
#include "clang/AST/AttrImpl.inc"
diff --git a/lib/AST/CXXABI.h b/lib/AST/CXXABI.h
index c23b9191c7ab..924ef00e8147 100644
--- a/lib/AST/CXXABI.h
+++ b/lib/AST/CXXABI.h
@@ -43,7 +43,8 @@ public:
virtual bool isNearlyEmpty(const CXXRecordDecl *RD) const = 0;
/// Returns a new mangling number context for this C++ ABI.
- virtual MangleNumberingContext *createMangleNumberingContext() const = 0;
+ virtual std::unique_ptr<MangleNumberingContext>
+ createMangleNumberingContext() const = 0;
/// Adds a mapping from class to copy constructor for this C++ ABI.
virtual void addCopyConstructorForExceptionObject(CXXRecordDecl *,
@@ -53,12 +54,6 @@ public:
virtual const CXXConstructorDecl *
getCopyConstructorForExceptionObject(CXXRecordDecl *) = 0;
- virtual void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
- unsigned ParmIdx, Expr *DAE) = 0;
-
- virtual Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
- unsigned ParmIdx) = 0;
-
virtual void addTypedefNameForUnnamedTagDecl(TagDecl *TD,
TypedefNameDecl *DD) = 0;
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index 6a6ca76a0165..a97d6a22e7b3 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -16,7 +16,6 @@
#include "clang/AST/RecordLayout.h"
#include "llvm/ADT/SetVector.h"
#include <algorithm>
-#include <set>
using namespace clang;
diff --git a/lib/AST/Comment.cpp b/lib/AST/Comment.cpp
index 893bdc5c17bf..7a7d3dd8304e 100644
--- a/lib/AST/Comment.cpp
+++ b/lib/AST/Comment.cpp
@@ -7,14 +7,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/ASTContext.h"
#include "clang/AST/Comment.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
namespace clang {
namespace comments {
@@ -114,6 +113,65 @@ bool ParagraphComment::isWhitespaceNoCache() const {
return true;
}
+static TypeLoc lookThroughTypedefOrTypeAliasLocs(TypeLoc &SrcTL) {
+ TypeLoc TL = SrcTL.IgnoreParens();
+
+ // Look through qualified types.
+ if (QualifiedTypeLoc QualifiedTL = TL.getAs<QualifiedTypeLoc>())
+ return QualifiedTL.getUnqualifiedLoc();
+ // Look through pointer types.
+ if (PointerTypeLoc PointerTL = TL.getAs<PointerTypeLoc>())
+ return PointerTL.getPointeeLoc().getUnqualifiedLoc();
+ // Look through reference types.
+ if (ReferenceTypeLoc ReferenceTL = TL.getAs<ReferenceTypeLoc>())
+ return ReferenceTL.getPointeeLoc().getUnqualifiedLoc();
+ // Look through adjusted types.
+ if (AdjustedTypeLoc ATL = TL.getAs<AdjustedTypeLoc>())
+ return ATL.getOriginalLoc();
+ if (BlockPointerTypeLoc BlockPointerTL = TL.getAs<BlockPointerTypeLoc>())
+ return BlockPointerTL.getPointeeLoc().getUnqualifiedLoc();
+ if (MemberPointerTypeLoc MemberPointerTL = TL.getAs<MemberPointerTypeLoc>())
+ return MemberPointerTL.getPointeeLoc().getUnqualifiedLoc();
+ if (ElaboratedTypeLoc ETL = TL.getAs<ElaboratedTypeLoc>())
+ return ETL.getNamedTypeLoc();
+
+ return TL;
+}
+
+static bool getFunctionTypeLoc(TypeLoc TL, FunctionTypeLoc &ResFTL) {
+ TypeLoc PrevTL;
+ while (PrevTL != TL) {
+ PrevTL = TL;
+ TL = lookThroughTypedefOrTypeAliasLocs(TL);
+ }
+
+ if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
+ ResFTL = FTL;
+ return true;
+ }
+
+ if (TemplateSpecializationTypeLoc STL =
+ TL.getAs<TemplateSpecializationTypeLoc>()) {
+ // If we have a typedef to a template specialization with exactly one
+ // template argument of a function type, this looks like std::function,
+ // boost::function, or other function wrapper. Treat these typedefs as
+ // functions.
+ if (STL.getNumArgs() != 1)
+ return false;
+ TemplateArgumentLoc MaybeFunction = STL.getArgLoc(0);
+ if (MaybeFunction.getArgument().getKind() != TemplateArgument::Type)
+ return false;
+ TypeSourceInfo *MaybeFunctionTSI = MaybeFunction.getTypeSourceInfo();
+ TypeLoc TL = MaybeFunctionTSI->getTypeLoc().getUnqualifiedLoc();
+ if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
+ ResFTL = FTL;
+ return true;
+ }
+ }
+
+ return false;
+}
+
const char *ParamCommandComment::getDirectionAsString(PassDirection D) {
switch (D) {
case ParamCommandComment::In:
@@ -227,90 +285,45 @@ void DeclInfo::fill() {
case Decl::Namespace:
Kind = NamespaceKind;
break;
+ case Decl::TypeAlias:
case Decl::Typedef: {
Kind = TypedefKind;
- // If this is a typedef to something we consider a function, extract
+ // If this is a typedef / using to something we consider a function, extract
// arguments and return type.
- const TypedefDecl *TD = cast<TypedefDecl>(CommentDecl);
- const TypeSourceInfo *TSI = TD->getTypeSourceInfo();
+ const TypeSourceInfo *TSI =
+ K == Decl::Typedef
+ ? cast<TypedefDecl>(CommentDecl)->getTypeSourceInfo()
+ : cast<TypeAliasDecl>(CommentDecl)->getTypeSourceInfo();
if (!TSI)
break;
TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc();
- while (true) {
- TL = TL.IgnoreParens();
- // Look through qualified types.
- if (QualifiedTypeLoc QualifiedTL = TL.getAs<QualifiedTypeLoc>()) {
- TL = QualifiedTL.getUnqualifiedLoc();
- continue;
- }
- // Look through pointer types.
- if (PointerTypeLoc PointerTL = TL.getAs<PointerTypeLoc>()) {
- TL = PointerTL.getPointeeLoc().getUnqualifiedLoc();
- continue;
- }
- // Look through reference types.
- if (ReferenceTypeLoc ReferenceTL = TL.getAs<ReferenceTypeLoc>()) {
- TL = ReferenceTL.getPointeeLoc().getUnqualifiedLoc();
- continue;
- }
- // Look through adjusted types.
- if (AdjustedTypeLoc ATL = TL.getAs<AdjustedTypeLoc>()) {
- TL = ATL.getOriginalLoc();
- continue;
- }
- if (BlockPointerTypeLoc BlockPointerTL =
- TL.getAs<BlockPointerTypeLoc>()) {
- TL = BlockPointerTL.getPointeeLoc().getUnqualifiedLoc();
- continue;
- }
- if (MemberPointerTypeLoc MemberPointerTL =
- TL.getAs<MemberPointerTypeLoc>()) {
- TL = MemberPointerTL.getPointeeLoc().getUnqualifiedLoc();
- continue;
- }
- if (ElaboratedTypeLoc ETL = TL.getAs<ElaboratedTypeLoc>()) {
- TL = ETL.getNamedTypeLoc();
- continue;
- }
- // Is this a typedef for a function type?
- if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
- Kind = FunctionKind;
- ParamVars = FTL.getParams();
- ReturnType = FTL.getReturnLoc().getType();
- break;
- }
- if (TemplateSpecializationTypeLoc STL =
- TL.getAs<TemplateSpecializationTypeLoc>()) {
- // If we have a typedef to a template specialization with exactly one
- // template argument of a function type, this looks like std::function,
- // boost::function, or other function wrapper. Treat these typedefs as
- // functions.
- if (STL.getNumArgs() != 1)
- break;
- TemplateArgumentLoc MaybeFunction = STL.getArgLoc(0);
- if (MaybeFunction.getArgument().getKind() != TemplateArgument::Type)
- break;
- TypeSourceInfo *MaybeFunctionTSI = MaybeFunction.getTypeSourceInfo();
- TypeLoc TL = MaybeFunctionTSI->getTypeLoc().getUnqualifiedLoc();
- if (FunctionTypeLoc FTL = TL.getAs<FunctionTypeLoc>()) {
- Kind = FunctionKind;
- ParamVars = FTL.getParams();
- ReturnType = FTL.getReturnLoc().getType();
- }
- break;
- }
- break;
+ FunctionTypeLoc FTL;
+ if (getFunctionTypeLoc(TL, FTL)) {
+ Kind = FunctionKind;
+ ParamVars = FTL.getParams();
+ ReturnType = FTL.getReturnLoc().getType();
}
break;
}
- case Decl::TypeAlias:
- Kind = TypedefKind;
- break;
case Decl::TypeAliasTemplate: {
const TypeAliasTemplateDecl *TAT = cast<TypeAliasTemplateDecl>(CommentDecl);
Kind = TypedefKind;
TemplateKind = Template;
TemplateParameters = TAT->getTemplateParameters();
+ TypeAliasDecl *TAD = TAT->getTemplatedDecl();
+ if (!TAD)
+ break;
+
+ const TypeSourceInfo *TSI = TAD->getTypeSourceInfo();
+ if (!TSI)
+ break;
+ TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc();
+ FunctionTypeLoc FTL;
+ if (getFunctionTypeLoc(TL, FTL)) {
+ Kind = FunctionKind;
+ ParamVars = FTL.getParams();
+ ReturnType = FTL.getReturnLoc().getType();
+ }
break;
}
case Decl::Enum:
diff --git a/lib/AST/CommentBriefParser.cpp b/lib/AST/CommentBriefParser.cpp
index 090b9211d4c1..eecea8fc11df 100644
--- a/lib/AST/CommentBriefParser.cpp
+++ b/lib/AST/CommentBriefParser.cpp
@@ -9,7 +9,6 @@
#include "clang/AST/CommentBriefParser.h"
#include "clang/AST/CommentCommandTraits.h"
-#include "llvm/ADT/StringSwitch.h"
namespace clang {
namespace comments {
diff --git a/lib/AST/CommentLexer.cpp b/lib/AST/CommentLexer.cpp
index 57bfef08df6e..65d0f56f09ab 100644
--- a/lib/AST/CommentLexer.cpp
+++ b/lib/AST/CommentLexer.cpp
@@ -378,15 +378,17 @@ void Lexer::lexCommentText(Token &T) {
if ((Info = Traits.getTypoCorrectCommandInfo(CommandName))) {
StringRef CorrectedName = Info->Name;
SourceLocation Loc = getSourceLocation(BufferPtr);
- SourceRange CommandRange(Loc.getLocWithOffset(1),
- getSourceLocation(TokenPtr));
+ SourceLocation EndLoc = getSourceLocation(TokenPtr);
+ SourceRange FullRange = SourceRange(Loc, EndLoc);
+ SourceRange CommandRange(Loc.getLocWithOffset(1), EndLoc);
Diag(Loc, diag::warn_correct_comment_command_name)
- << CommandName << CorrectedName
+ << FullRange << CommandName << CorrectedName
<< FixItHint::CreateReplacement(CommandRange, CorrectedName);
} else {
formTokenWithChars(T, TokenPtr, tok::unknown_command);
T.setUnknownCommandName(CommandName);
- Diag(T.getLocation(), diag::warn_unknown_comment_command_name);
+ Diag(T.getLocation(), diag::warn_unknown_comment_command_name)
+ << SourceRange(T.getLocation(), T.getEndLocation());
return;
}
}
diff --git a/lib/AST/CommentParser.cpp b/lib/AST/CommentParser.cpp
index cb37ec35f4d3..c1c04239f58e 100644
--- a/lib/AST/CommentParser.cpp
+++ b/lib/AST/CommentParser.cpp
@@ -40,11 +40,11 @@ class TextTokenRetokenizer {
/// A position in \c Toks.
struct Position {
- unsigned CurToken;
const char *BufferStart;
const char *BufferEnd;
const char *BufferPtr;
SourceLocation BufferStartLoc;
+ unsigned CurToken;
};
/// Current position in Toks.
diff --git a/lib/AST/CommentSema.cpp b/lib/AST/CommentSema.cpp
index f5f4f70dcbbf..d39a9b26b2a8 100644
--- a/lib/AST/CommentSema.cpp
+++ b/lib/AST/CommentSema.cpp
@@ -950,20 +950,19 @@ unsigned Sema::resolveParmVarReference(StringRef Name,
namespace {
class SimpleTypoCorrector {
+ const NamedDecl *BestDecl;
+
StringRef Typo;
const unsigned MaxEditDistance;
- const NamedDecl *BestDecl;
unsigned BestEditDistance;
unsigned BestIndex;
unsigned NextIndex;
public:
- SimpleTypoCorrector(StringRef Typo) :
- Typo(Typo), MaxEditDistance((Typo.size() + 2) / 3),
- BestDecl(nullptr), BestEditDistance(MaxEditDistance + 1),
- BestIndex(0), NextIndex(0)
- { }
+ explicit SimpleTypoCorrector(StringRef Typo)
+ : BestDecl(nullptr), Typo(Typo), MaxEditDistance((Typo.size() + 2) / 3),
+ BestEditDistance(MaxEditDistance + 1), BestIndex(0), NextIndex(0) {}
void addDecl(const NamedDecl *ND);
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index d1e8d25ea044..c3fa1c87affd 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -1395,6 +1395,10 @@ static LinkageInfo getLVForDecl(const NamedDecl *D,
return clang::LinkageComputer::getLVForDecl(D, computation);
}
+void NamedDecl::printName(raw_ostream &os) const {
+ os << Name;
+}
+
std::string NamedDecl::getQualifiedNameAsString() const {
std::string QualName;
llvm::raw_string_ostream OS(QualName);
@@ -1481,7 +1485,7 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
OS << "::";
}
- if (getDeclName())
+ if (getDeclName() || isa<DecompositionDecl>(this))
OS << *this;
else
OS << "(anonymous)";
@@ -1922,6 +1926,9 @@ VarDecl::isThisDeclarationADefinition(ASTContext &C) const {
//
// FIXME: How do you declare (but not define) a partial specialization of
// a static data member template outside the containing class?
+ if (isThisDeclarationADemotedDefinition())
+ return DeclarationOnly;
+
if (isStaticDataMember()) {
if (isOutOfLine() &&
!(getCanonicalDecl()->isInline() &&
@@ -2246,6 +2253,56 @@ bool VarDecl::checkInitIsICE() const {
return Eval->IsICE;
}
+VarDecl *VarDecl::getTemplateInstantiationPattern() const {
+ // If it's a variable template specialization, find the template or partial
+ // specialization from which it was instantiated.
+ if (auto *VDTemplSpec = dyn_cast<VarTemplateSpecializationDecl>(this)) {
+ auto From = VDTemplSpec->getInstantiatedFrom();
+ if (auto *VTD = From.dyn_cast<VarTemplateDecl *>()) {
+ while (auto *NewVTD = VTD->getInstantiatedFromMemberTemplate()) {
+ if (NewVTD->isMemberSpecialization())
+ break;
+ VTD = NewVTD;
+ }
+ return VTD->getTemplatedDecl()->getDefinition();
+ }
+ if (auto *VTPSD =
+ From.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
+ while (auto *NewVTPSD = VTPSD->getInstantiatedFromMember()) {
+ if (NewVTPSD->isMemberSpecialization())
+ break;
+ VTPSD = NewVTPSD;
+ }
+ return VTPSD->getDefinition();
+ }
+ }
+
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) {
+ if (isTemplateInstantiation(MSInfo->getTemplateSpecializationKind())) {
+ VarDecl *VD = getInstantiatedFromStaticDataMember();
+ while (auto *NewVD = VD->getInstantiatedFromStaticDataMember())
+ VD = NewVD;
+ return VD->getDefinition();
+ }
+ }
+
+ if (VarTemplateDecl *VarTemplate = getDescribedVarTemplate()) {
+
+ while (VarTemplate->getInstantiatedFromMemberTemplate()) {
+ if (VarTemplate->isMemberSpecialization())
+ break;
+ VarTemplate = VarTemplate->getInstantiatedFromMemberTemplate();
+ }
+
+ assert((!VarTemplate->getTemplatedDecl() ||
+ !isTemplateInstantiation(getTemplateSpecializationKind())) &&
+ "couldn't find pattern for variable instantiation");
+
+ return VarTemplate->getTemplatedDecl();
+ }
+ return nullptr;
+}
+
VarDecl *VarDecl::getInstantiatedFromStaticDataMember() const {
if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
return cast<VarDecl>(MSI->getInstantiatedFrom());
@@ -2592,7 +2649,7 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction() const {
return false;
const auto *FPT = getType()->castAs<FunctionProtoType>();
- if (FPT->getNumParams() == 0 || FPT->getNumParams() > 2 || FPT->isVariadic())
+ if (FPT->getNumParams() == 0 || FPT->getNumParams() > 3 || FPT->isVariadic())
return false;
// If this is a single-parameter function, it must be a replaceable global
@@ -2600,20 +2657,42 @@ bool FunctionDecl::isReplaceableGlobalAllocationFunction() const {
if (FPT->getNumParams() == 1)
return true;
- // Otherwise, we're looking for a second parameter whose type is
- // 'const std::nothrow_t &', or, in C++1y, 'std::size_t'.
- QualType Ty = FPT->getParamType(1);
+ unsigned Params = 1;
+ QualType Ty = FPT->getParamType(Params);
ASTContext &Ctx = getASTContext();
+
+ auto Consume = [&] {
+ ++Params;
+ Ty = Params < FPT->getNumParams() ? FPT->getParamType(Params) : QualType();
+ };
+
+ // In C++14, the next parameter can be a 'std::size_t' for sized delete.
+ bool IsSizedDelete = false;
if (Ctx.getLangOpts().SizedDeallocation &&
- Ctx.hasSameType(Ty, Ctx.getSizeType()))
- return true;
- if (!Ty->isReferenceType())
- return false;
- Ty = Ty->getPointeeType();
- if (Ty.getCVRQualifiers() != Qualifiers::Const)
- return false;
- const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
- return RD && isNamed(RD, "nothrow_t") && RD->isInStdNamespace();
+ (getDeclName().getCXXOverloadedOperator() == OO_Delete ||
+ getDeclName().getCXXOverloadedOperator() == OO_Array_Delete) &&
+ Ctx.hasSameType(Ty, Ctx.getSizeType())) {
+ IsSizedDelete = true;
+ Consume();
+ }
+
+ // In C++17, the next parameter can be a 'std::align_val_t' for aligned
+ // new/delete.
+ if (Ctx.getLangOpts().AlignedAllocation && !Ty.isNull() && Ty->isAlignValT())
+ Consume();
+
+ // Finally, if this is not a sized delete, the final parameter can
+ // be a 'const std::nothrow_t&'.
+ if (!IsSizedDelete && !Ty.isNull() && Ty->isReferenceType()) {
+ Ty = Ty->getPointeeType();
+ if (Ty.getCVRQualifiers() != Qualifiers::Const)
+ return false;
+ const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ if (RD && isNamed(RD, "nothrow_t") && RD->isInStdNamespace())
+ Consume();
+ }
+
+ return Params == FPT->getNumParams();
}
LanguageLinkage FunctionDecl::getLanguageLinkage() const {
@@ -2653,9 +2732,14 @@ bool FunctionDecl::isGlobal() const {
}
bool FunctionDecl::isNoReturn() const {
- return hasAttr<NoReturnAttr>() || hasAttr<CXX11NoReturnAttr>() ||
- hasAttr<C11NoReturnAttr>() ||
- getType()->getAs<FunctionType>()->getNoReturnAttr();
+ if (hasAttr<NoReturnAttr>() || hasAttr<CXX11NoReturnAttr>() ||
+ hasAttr<C11NoReturnAttr>())
+ return true;
+
+ if (auto *FnTy = getType()->getAs<FunctionType>())
+ return FnTy->getNoReturnAttr();
+
+ return false;
}
void
@@ -2756,28 +2840,6 @@ void FunctionDecl::setParams(ASTContext &C,
}
}
-void FunctionDecl::setDeclsInPrototypeScope(ArrayRef<NamedDecl *> NewDecls) {
- assert(DeclsInPrototypeScope.empty() && "Already has prototype decls!");
-
- if (!NewDecls.empty()) {
- NamedDecl **A = new (getASTContext()) NamedDecl*[NewDecls.size()];
- std::copy(NewDecls.begin(), NewDecls.end(), A);
- DeclsInPrototypeScope = llvm::makeArrayRef(A, NewDecls.size());
- // Move declarations introduced in prototype to the function context.
- for (auto I : NewDecls) {
- DeclContext *DC = I->getDeclContext();
- // Forward-declared reference to an enumeration is not added to
- // declaration scope, so skip declaration that is absent from its
- // declaration contexts.
- if (DC->containsDecl(I)) {
- DC->removeDecl(I);
- I->setDeclContext(this);
- addDecl(I);
- }
- }
- }
-}
-
/// getMinRequiredArguments - Returns the minimum number of arguments
/// needed to call this function. This may be fewer than the number of
/// function parameters, if some of the parameters have default
@@ -2964,7 +3026,8 @@ const Attr *FunctionDecl::getUnusedResultAttr() const {
/// an externally visible symbol, but "extern inline" will not create an
/// externally visible symbol.
bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
- assert(doesThisDeclarationHaveABody() && "Must have the function definition");
+ assert((doesThisDeclarationHaveABody() || willHaveBody()) &&
+ "Must be a function definition");
assert(isInlined() && "Function must be inline");
ASTContext &Context = getASTContext();
@@ -3408,6 +3471,10 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
case Builtin::BIstrlen:
return Builtin::BIstrlen;
+ case Builtin::BI__builtin_bzero:
+ case Builtin::BIbzero:
+ return Builtin::BIbzero;
+
default:
if (isExternC()) {
if (FnInfo->isStr("memset"))
@@ -3430,6 +3497,8 @@ unsigned FunctionDecl::getMemoryFunctionKind() const {
return Builtin::BIstrndup;
else if (FnInfo->isStr("strlen"))
return Builtin::BIstrlen;
+ else if (FnInfo->isStr("bzero"))
+ return Builtin::BIbzero;
}
break;
}
@@ -4281,3 +4350,18 @@ SourceRange ImportDecl::getSourceRange() const {
return SourceRange(getLocation(), getIdentifierLocs().back());
}
+
+//===----------------------------------------------------------------------===//
+// ExportDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void ExportDecl::anchor() {}
+
+ExportDecl *ExportDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation ExportLoc) {
+ return new (C, DC) ExportDecl(DC, ExportLoc);
+}
+
+ExportDecl *ExportDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) ExportDecl(nullptr, SourceLocation());
+}
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index bfb7d02b2955..6111abab646e 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -28,7 +28,6 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/Type.h"
#include "clang/Basic/TargetInfo.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace clang;
@@ -46,8 +45,7 @@ void Decl::updateOutOfDate(IdentifierInfo &II) const {
}
#define DECL(DERIVED, BASE) \
- static_assert(llvm::AlignOf<Decl>::Alignment >= \
- llvm::AlignOf<DERIVED##Decl>::Alignment, \
+ static_assert(alignof(Decl) >= alignof(DERIVED##Decl), \
"Alignment sufficient after objects prepended to " #DERIVED);
#define ABSTRACT_DECL(DECL)
#include "clang/AST/DeclNodes.inc"
@@ -56,7 +54,7 @@ void *Decl::operator new(std::size_t Size, const ASTContext &Context,
unsigned ID, std::size_t Extra) {
// Allocate an extra 8 bytes worth of storage, which ensures that the
// resulting pointer will still be 8-byte aligned.
- static_assert(sizeof(unsigned) * 2 >= llvm::AlignOf<Decl>::Alignment,
+ static_assert(sizeof(unsigned) * 2 >= alignof(Decl),
"Decl won't be misaligned");
void *Start = Context.Allocate(Size + Extra + 8);
void *Result = (char*)Start + 8;
@@ -81,8 +79,7 @@ void *Decl::operator new(std::size_t Size, const ASTContext &Ctx,
// Ensure required alignment of the resulting object by adding extra
// padding at the start if required.
size_t ExtraAlign =
- llvm::OffsetToAlignment(sizeof(Module *),
- llvm::AlignOf<Decl>::Alignment);
+ llvm::OffsetToAlignment(sizeof(Module *), alignof(Decl));
char *Buffer = reinterpret_cast<char *>(
::operator new(ExtraAlign + sizeof(Module *) + Size + Extra, Ctx));
Buffer += ExtraAlign;
@@ -112,12 +109,24 @@ const char *Decl::getDeclKindName() const {
void Decl::setInvalidDecl(bool Invalid) {
InvalidDecl = Invalid;
assert(!isa<TagDecl>(this) || !cast<TagDecl>(this)->isCompleteDefinition());
- if (Invalid && !isa<ParmVarDecl>(this)) {
+ if (!Invalid) {
+ return;
+ }
+
+ if (!isa<ParmVarDecl>(this)) {
// Defensive maneuver for ill-formed code: we're likely not to make it to
// a point where we set the access specifier, so default it to "public"
// to avoid triggering asserts elsewhere in the front end.
setAccess(AS_public);
}
+
+ // Marking a DecompositionDecl as invalid implies all the child BindingDecl's
+ // are invalid too.
+ if (DecompositionDecl *DD = dyn_cast<DecompositionDecl>(this)) {
+ for (BindingDecl *Binding : DD->bindings()) {
+ Binding->setInvalidDecl();
+ }
+ }
}
const char *DeclContext::getDeclKindName() const {
@@ -378,6 +387,22 @@ bool Decl::isReferenced() const {
return false;
}
+bool Decl::isExported() const {
+ if (isModulePrivate())
+ return false;
+ // Namespaces are always exported.
+ if (isa<TranslationUnitDecl>(this) || isa<NamespaceDecl>(this))
+ return true;
+ // Otherwise, this is a strictly lexical check.
+ for (auto *DC = getLexicalDeclContext(); DC; DC = DC->getLexicalParent()) {
+ if (cast<Decl>(DC)->isModulePrivate())
+ return false;
+ if (isa<ExportDecl>(DC))
+ return true;
+ }
+ return false;
+}
+
bool Decl::hasDefiningAttr() const {
return hasAttr<AliasAttr>() || hasAttr<IFuncAttr>();
}
@@ -401,11 +426,12 @@ const Attr *Decl::getDefiningAttr() const {
/// diagnostics.
static AvailabilityResult CheckAvailability(ASTContext &Context,
const AvailabilityAttr *A,
- std::string *Message) {
- VersionTuple TargetMinVersion =
- Context.getTargetInfo().getPlatformMinVersion();
+ std::string *Message,
+ VersionTuple EnclosingVersion) {
+ if (EnclosingVersion.empty())
+ EnclosingVersion = Context.getTargetInfo().getPlatformMinVersion();
- if (TargetMinVersion.empty())
+ if (EnclosingVersion.empty())
return AR_Available;
// Check if this is an App Extension "platform", and if so chop off
@@ -450,7 +476,7 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
// Make sure that this declaration has already been introduced.
if (!A->getIntroduced().empty() &&
- TargetMinVersion < A->getIntroduced()) {
+ EnclosingVersion < A->getIntroduced()) {
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
@@ -464,7 +490,7 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
}
// Make sure that this declaration hasn't been obsoleted.
- if (!A->getObsoleted().empty() && TargetMinVersion >= A->getObsoleted()) {
+ if (!A->getObsoleted().empty() && EnclosingVersion >= A->getObsoleted()) {
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
@@ -478,7 +504,7 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
}
// Make sure that this declaration hasn't been deprecated.
- if (!A->getDeprecated().empty() && TargetMinVersion >= A->getDeprecated()) {
+ if (!A->getDeprecated().empty() && EnclosingVersion >= A->getDeprecated()) {
if (Message) {
Message->clear();
llvm::raw_string_ostream Out(*Message);
@@ -494,9 +520,10 @@ static AvailabilityResult CheckAvailability(ASTContext &Context,
return AR_Available;
}
-AvailabilityResult Decl::getAvailability(std::string *Message) const {
+AvailabilityResult Decl::getAvailability(std::string *Message,
+ VersionTuple EnclosingVersion) const {
if (auto *FTD = dyn_cast<FunctionTemplateDecl>(this))
- return FTD->getTemplatedDecl()->getAvailability(Message);
+ return FTD->getTemplatedDecl()->getAvailability(Message, EnclosingVersion);
AvailabilityResult Result = AR_Available;
std::string ResultMessage;
@@ -521,7 +548,7 @@ AvailabilityResult Decl::getAvailability(std::string *Message) const {
if (const auto *Availability = dyn_cast<AvailabilityAttr>(A)) {
AvailabilityResult AR = CheckAvailability(getASTContext(), Availability,
- Message);
+ Message, EnclosingVersion);
if (AR == AR_Unavailable)
return AR_Unavailable;
@@ -580,8 +607,8 @@ bool Decl::isWeakImported() const {
return true;
if (const auto *Availability = dyn_cast<AvailabilityAttr>(A)) {
- if (CheckAvailability(getASTContext(), Availability,
- nullptr) == AR_NotYetIntroduced)
+ if (CheckAvailability(getASTContext(), Availability, nullptr,
+ VersionTuple()) == AR_NotYetIntroduced)
return true;
}
}
@@ -599,6 +626,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case CXXConversion:
case EnumConstant:
case Var:
+ case Binding:
case ImplicitParam:
case ParmVar:
case ObjCMethod:
@@ -623,11 +651,13 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case Typedef:
case TypeAlias:
case TypeAliasTemplate:
- case UnresolvedUsingTypename:
case TemplateTypeParm:
case ObjCTypeParam:
return IDNS_Ordinary | IDNS_Type;
+ case UnresolvedUsingTypename:
+ return IDNS_Ordinary | IDNS_Type | IDNS_Using;
+
case UsingShadow:
return 0; // we'll actually overwrite this later
@@ -635,6 +665,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
return IDNS_Ordinary | IDNS_Using;
case Using:
+ case UsingPack:
return IDNS_Using;
case ObjCProtocol:
@@ -670,6 +701,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case FriendTemplate:
case AccessSpec:
case LinkageSpec:
+ case Export:
case FileScopeAsm:
case StaticAssert:
case ObjCPropertyImpl:
@@ -679,6 +711,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case Captured:
case TranslationUnit:
case ExternCContext:
+ case Decomposition:
case UsingDirective:
case BuiltinTemplate:
@@ -954,7 +987,7 @@ bool DeclContext::isDependentContext() const {
bool DeclContext::isTransparentContext() const {
if (DeclKind == Decl::Enum)
return !cast<EnumDecl>(this)->isScoped();
- else if (DeclKind == Decl::LinkageSpec)
+ else if (DeclKind == Decl::LinkageSpec || DeclKind == Decl::Export)
return true;
return false;
@@ -974,6 +1007,18 @@ bool DeclContext::isExternCContext() const {
return isLinkageSpecContext(this, clang::LinkageSpecDecl::lang_c);
}
+const LinkageSpecDecl *DeclContext::getExternCContext() const {
+ const DeclContext *DC = this;
+ while (DC->getDeclKind() != Decl::TranslationUnit) {
+ if (DC->getDeclKind() == Decl::LinkageSpec &&
+ cast<LinkageSpecDecl>(DC)->getLanguage() ==
+ clang::LinkageSpecDecl::lang_c)
+ return cast<LinkageSpecDecl>(DC);
+ DC = DC->getLexicalParent();
+ }
+ return nullptr;
+}
+
bool DeclContext::isExternCXXContext() const {
return isLinkageSpecContext(this, clang::LinkageSpecDecl::lang_cxx);
}
@@ -993,6 +1038,7 @@ DeclContext *DeclContext::getPrimaryContext() {
case Decl::TranslationUnit:
case Decl::ExternCContext:
case Decl::LinkageSpec:
+ case Decl::Export:
case Decl::Block:
case Decl::Captured:
case Decl::OMPDeclareReduction:
@@ -1405,8 +1451,8 @@ NamedDecl *const DeclContextLookupResult::SingleElementDummyList = nullptr;
DeclContext::lookup_result
DeclContext::lookup(DeclarationName Name) const {
- assert(DeclKind != Decl::LinkageSpec &&
- "Should not perform lookups into linkage specs!");
+ assert(DeclKind != Decl::LinkageSpec && DeclKind != Decl::Export &&
+ "should not perform lookups into transparent contexts");
const DeclContext *PrimaryContext = getPrimaryContext();
if (PrimaryContext != this)
@@ -1467,8 +1513,8 @@ DeclContext::lookup(DeclarationName Name) const {
DeclContext::lookup_result
DeclContext::noload_lookup(DeclarationName Name) {
- assert(DeclKind != Decl::LinkageSpec &&
- "Should not perform lookups into linkage specs!");
+ assert(DeclKind != Decl::LinkageSpec && DeclKind != Decl::Export &&
+ "should not perform lookups into transparent contexts");
DeclContext *PrimaryContext = getPrimaryContext();
if (PrimaryContext != this)
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index 81f94148d6ed..a9db65a51518 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -533,6 +533,12 @@ void CXXRecordDecl::addedMember(Decl *D) {
} else if (Constructor->isMoveConstructor())
SMKind |= SMF_MoveConstructor;
}
+
+ // C++11 [dcl.init.aggr]p1: DR1518
+ // An aggregate is an array or a class with no user-provided, explicit, or
+ // inherited constructors
+ if (Constructor->isUserProvided() || Constructor->isExplicit())
+ data().Aggregate = false;
}
// Handle constructors, including those inherited from base classes.
@@ -546,20 +552,6 @@ void CXXRecordDecl::addedMember(Decl *D) {
// constructor [...]
if (Constructor->isConstexpr() && !Constructor->isCopyOrMoveConstructor())
data().HasConstexprNonCopyMoveConstructor = true;
-
- // C++ [dcl.init.aggr]p1:
- // An aggregate is an array or a class with no user-declared
- // constructors [...].
- // C++11 [dcl.init.aggr]p1:
- // An aggregate is an array or a class with no user-provided
- // constructors [...].
- // C++11 [dcl.init.aggr]p1:
- // An aggregate is an array or a class with no user-provided
- // constructors (including those inherited from a base class) [...].
- if (getASTContext().getLangOpts().CPlusPlus11
- ? Constructor->isUserProvided()
- : !Constructor->isImplicit())
- data().Aggregate = false;
}
// Handle destructors.
@@ -739,7 +731,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
if (!Field->hasInClassInitializer() && !Field->isMutable()) {
- if (CXXRecordDecl *FieldType = Field->getType()->getAsCXXRecordDecl()) {
+ if (CXXRecordDecl *FieldType = T->getAsCXXRecordDecl()) {
if (FieldType->hasDefinition() && !FieldType->allowConstDefaultInit())
data().HasUninitializedFields = true;
} else {
@@ -989,8 +981,12 @@ void CXXRecordDecl::addedMember(Decl *D) {
if (UsingDecl *Using = dyn_cast<UsingDecl>(D)) {
if (Using->getDeclName().getNameKind() ==
- DeclarationName::CXXConstructorName)
+ DeclarationName::CXXConstructorName) {
data().HasInheritedConstructor = true;
+ // C++1z [dcl.init.aggr]p1:
+ // An aggregate is [...] a class [...] with no inherited constructors
+ data().Aggregate = false;
+ }
if (Using->getDeclName().getCXXOverloadedOperator() == OO_Equal)
data().HasInheritedAssignment = true;
@@ -1107,6 +1103,12 @@ CXXRecordDecl::getGenericLambdaTemplateParameterList() const {
return nullptr;
}
+Decl *CXXRecordDecl::getLambdaContextDecl() const {
+ assert(isLambda() && "Not a lambda closure type!");
+ ExternalASTSource *Source = getParentASTContext().getExternalSource();
+ return getLambdaData().ContextDecl.get(Source);
+}
+
static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
QualType T =
cast<CXXConversionDecl>(Conv->getUnderlyingDecl()->getAsFunction())
@@ -1571,17 +1573,35 @@ bool CXXMethodDecl::isUsualDeallocationFunction() const {
// deallocation function. [...]
if (getNumParams() == 1)
return true;
-
- // C++ [basic.stc.dynamic.deallocation]p2:
+ unsigned UsualParams = 1;
+
+ // C++ <=14 [basic.stc.dynamic.deallocation]p2:
// [...] If class T does not declare such an operator delete but does
// declare a member deallocation function named operator delete with
// exactly two parameters, the second of which has type std::size_t (18.1),
// then this function is a usual deallocation function.
+ //
+ // C++17 says a usual deallocation function is one with the signature
+ // (void* [, size_t] [, std::align_val_t] [, ...])
+ // and all such functions are usual deallocation functions. It's not clear
+ // that allowing varargs functions was intentional.
ASTContext &Context = getASTContext();
- if (getNumParams() != 2 ||
- !Context.hasSameUnqualifiedType(getParamDecl(1)->getType(),
- Context.getSizeType()))
+ if (UsualParams < getNumParams() &&
+ Context.hasSameUnqualifiedType(getParamDecl(UsualParams)->getType(),
+ Context.getSizeType()))
+ ++UsualParams;
+
+ if (UsualParams < getNumParams() &&
+ getParamDecl(UsualParams)->getType()->isAlignValT())
+ ++UsualParams;
+
+ if (UsualParams != getNumParams())
return false;
+
+ // In C++17 onwards, all potential usual deallocation functions are actual
+ // usual deallocation functions.
+ if (Context.getLangOpts().AlignedAllocation)
+ return true;
// This function is a usual deallocation function if there are no
// single-parameter deallocation functions of the same kind.
@@ -1714,7 +1734,7 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
SourceLocation EllipsisLoc)
: Initializee(TInfo), MemberOrEllipsisLocation(EllipsisLoc), Init(Init),
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(IsVirtual),
- IsWritten(false), SourceOrderOrNumArrayIndices(0)
+ IsWritten(false), SourceOrder(0)
{
}
@@ -1725,7 +1745,7 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
SourceLocation R)
: Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
- IsWritten(false), SourceOrderOrNumArrayIndices(0)
+ IsWritten(false), SourceOrder(0)
{
}
@@ -1736,7 +1756,7 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
SourceLocation R)
: Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
- IsWritten(false), SourceOrderOrNumArrayIndices(0)
+ IsWritten(false), SourceOrder(0)
{
}
@@ -1746,38 +1766,10 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
SourceLocation R)
: Initializee(TInfo), MemberOrEllipsisLocation(), Init(Init),
LParenLoc(L), RParenLoc(R), IsDelegating(true), IsVirtual(false),
- IsWritten(false), SourceOrderOrNumArrayIndices(0)
+ IsWritten(false), SourceOrder(0)
{
}
-CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
- FieldDecl *Member,
- SourceLocation MemberLoc,
- SourceLocation L, Expr *Init,
- SourceLocation R,
- VarDecl **Indices,
- unsigned NumIndices)
- : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
- LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
- IsWritten(false), SourceOrderOrNumArrayIndices(NumIndices)
-{
- std::uninitialized_copy(Indices, Indices + NumIndices,
- getTrailingObjects<VarDecl *>());
-}
-
-CXXCtorInitializer *CXXCtorInitializer::Create(ASTContext &Context,
- FieldDecl *Member,
- SourceLocation MemberLoc,
- SourceLocation L, Expr *Init,
- SourceLocation R,
- VarDecl **Indices,
- unsigned NumIndices) {
- void *Mem = Context.Allocate(totalSizeToAlloc<VarDecl *>(NumIndices),
- llvm::alignOf<CXXCtorInitializer>());
- return new (Mem) CXXCtorInitializer(Context, Member, MemberLoc, L, Init, R,
- Indices, NumIndices);
-}
-
TypeLoc CXXCtorInitializer::getBaseClassLoc() const {
if (isBaseInitializer())
return Initializee.get<TypeSourceInfo*>()->getTypeLoc();
@@ -2253,15 +2245,37 @@ SourceRange UsingDecl::getSourceRange() const {
return SourceRange(Begin, getNameInfo().getEndLoc());
}
+void UsingPackDecl::anchor() { }
+
+UsingPackDecl *UsingPackDecl::Create(ASTContext &C, DeclContext *DC,
+ NamedDecl *InstantiatedFrom,
+ ArrayRef<NamedDecl *> UsingDecls) {
+ size_t Extra = additionalSizeToAlloc<NamedDecl *>(UsingDecls.size());
+ return new (C, DC, Extra) UsingPackDecl(DC, InstantiatedFrom, UsingDecls);
+}
+
+UsingPackDecl *UsingPackDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumExpansions) {
+ size_t Extra = additionalSizeToAlloc<NamedDecl *>(NumExpansions);
+ auto *Result = new (C, ID, Extra) UsingPackDecl(nullptr, nullptr, None);
+ Result->NumExpansions = NumExpansions;
+ auto *Trail = Result->getTrailingObjects<NamedDecl *>();
+ for (unsigned I = 0; I != NumExpansions; ++I)
+ new (Trail + I) NamedDecl*(nullptr);
+ return Result;
+}
+
void UnresolvedUsingValueDecl::anchor() { }
UnresolvedUsingValueDecl *
UnresolvedUsingValueDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation UsingLoc,
NestedNameSpecifierLoc QualifierLoc,
- const DeclarationNameInfo &NameInfo) {
+ const DeclarationNameInfo &NameInfo,
+ SourceLocation EllipsisLoc) {
return new (C, DC) UnresolvedUsingValueDecl(DC, C.DependentTy, UsingLoc,
- QualifierLoc, NameInfo);
+ QualifierLoc, NameInfo,
+ EllipsisLoc);
}
UnresolvedUsingValueDecl *
@@ -2269,7 +2283,8 @@ UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) UnresolvedUsingValueDecl(nullptr, QualType(),
SourceLocation(),
NestedNameSpecifierLoc(),
- DeclarationNameInfo());
+ DeclarationNameInfo(),
+ SourceLocation());
}
SourceRange UnresolvedUsingValueDecl::getSourceRange() const {
@@ -2286,17 +2301,18 @@ UnresolvedUsingTypenameDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation TypenameLoc,
NestedNameSpecifierLoc QualifierLoc,
SourceLocation TargetNameLoc,
- DeclarationName TargetName) {
+ DeclarationName TargetName,
+ SourceLocation EllipsisLoc) {
return new (C, DC) UnresolvedUsingTypenameDecl(
DC, UsingLoc, TypenameLoc, QualifierLoc, TargetNameLoc,
- TargetName.getAsIdentifierInfo());
+ TargetName.getAsIdentifierInfo(), EllipsisLoc);
}
UnresolvedUsingTypenameDecl *
UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) UnresolvedUsingTypenameDecl(
nullptr, SourceLocation(), SourceLocation(), NestedNameSpecifierLoc(),
- SourceLocation(), nullptr);
+ SourceLocation(), nullptr, SourceLocation());
}
void StaticAssertDecl::anchor() { }
@@ -2317,6 +2333,70 @@ StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
nullptr, SourceLocation(), false);
}
+void BindingDecl::anchor() {}
+
+BindingDecl *BindingDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdLoc, IdentifierInfo *Id) {
+ return new (C, DC) BindingDecl(DC, IdLoc, Id);
+}
+
+BindingDecl *BindingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) BindingDecl(nullptr, SourceLocation(), nullptr);
+}
+
+VarDecl *BindingDecl::getHoldingVar() const {
+ Expr *B = getBinding();
+ if (!B)
+ return nullptr;
+ auto *DRE = dyn_cast<DeclRefExpr>(B->IgnoreImplicit());
+ if (!DRE)
+ return nullptr;
+
+ auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ assert(VD->isImplicit() && "holding var for binding decl not implicit");
+ return VD;
+}
+
+void DecompositionDecl::anchor() {}
+
+DecompositionDecl *DecompositionDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation LSquareLoc,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass SC,
+ ArrayRef<BindingDecl *> Bindings) {
+ size_t Extra = additionalSizeToAlloc<BindingDecl *>(Bindings.size());
+ return new (C, DC, Extra)
+ DecompositionDecl(C, DC, StartLoc, LSquareLoc, T, TInfo, SC, Bindings);
+}
+
+DecompositionDecl *DecompositionDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID,
+ unsigned NumBindings) {
+ size_t Extra = additionalSizeToAlloc<BindingDecl *>(NumBindings);
+ auto *Result = new (C, ID, Extra)
+ DecompositionDecl(C, nullptr, SourceLocation(), SourceLocation(),
+ QualType(), nullptr, StorageClass(), None);
+ // Set up and clean out the bindings array.
+ Result->NumBindings = NumBindings;
+ auto *Trail = Result->getTrailingObjects<BindingDecl *>();
+ for (unsigned I = 0; I != NumBindings; ++I)
+ new (Trail + I) BindingDecl*(nullptr);
+ return Result;
+}
+
+void DecompositionDecl::printName(llvm::raw_ostream &os) const {
+ os << '[';
+ bool Comma = false;
+ for (auto *B : bindings()) {
+ if (Comma)
+ os << ", ";
+ B->printName(os);
+ Comma = true;
+ }
+ os << ']';
+}
+
MSPropertyDecl *MSPropertyDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName N,
QualType T, TypeSourceInfo *TInfo,
diff --git a/lib/AST/DeclGroup.cpp b/lib/AST/DeclGroup.cpp
index f162e6d40c48..2f95e1f1c345 100644
--- a/lib/AST/DeclGroup.cpp
+++ b/lib/AST/DeclGroup.cpp
@@ -14,13 +14,12 @@
#include "clang/AST/DeclGroup.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
-#include "llvm/Support/Allocator.h"
using namespace clang;
DeclGroup* DeclGroup::Create(ASTContext &C, Decl **Decls, unsigned NumDecls) {
assert(NumDecls > 1 && "Invalid DeclGroup");
unsigned Size = totalSizeToAlloc<Decl *>(NumDecls);
- void* Mem = C.Allocate(Size, llvm::AlignOf<DeclGroup>::Alignment);
+ void *Mem = C.Allocate(Size, alignof(DeclGroup));
new (Mem) DeclGroup(NumDecls, Decls);
return static_cast<DeclGroup*>(Mem);
}
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index d2701211beae..60d05f682e6e 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -800,8 +800,7 @@ void ObjCMethodDecl::setParamsAndSelLocs(ASTContext &C,
if (Params.empty() && SelLocs.empty())
return;
- static_assert(llvm::AlignOf<ParmVarDecl *>::Alignment >=
- llvm::AlignOf<SourceLocation>::Alignment,
+ static_assert(alignof(ParmVarDecl *) >= alignof(SourceLocation),
"Alignment not sufficient for SourceLocation");
unsigned Size = sizeof(ParmVarDecl *) * NumParams +
@@ -871,6 +870,12 @@ ObjCMethodDecl *ObjCMethodDecl::getNextRedeclarationImpl() {
}
}
+ // Ensure that the discovered method redeclaration has a valid declaration
+ // context. Used to prevent infinite loops when iterating redeclarations in
+ // a partially invalid AST.
+ if (Redecl && cast<Decl>(Redecl->getDeclContext())->isInvalidDecl())
+ Redecl = nullptr;
+
if (!Redecl && isRedeclaration()) {
// This is the last redeclaration, go back to the first method.
return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
@@ -897,9 +902,13 @@ ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() {
return MD;
}
- if (isRedeclaration())
- return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
- isInstanceMethod());
+ if (isRedeclaration()) {
+ // It is possible that we have not done deserializing the ObjCMethod yet.
+ ObjCMethodDecl *MD =
+ cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
+ isInstanceMethod());
+ return MD ? MD : this;
+ }
return this;
}
@@ -1320,8 +1329,12 @@ ObjCTypeParamDecl *ObjCTypeParamDecl::Create(ASTContext &ctx, DeclContext *dc,
IdentifierInfo *name,
SourceLocation colonLoc,
TypeSourceInfo *boundInfo) {
- return new (ctx, dc) ObjCTypeParamDecl(ctx, dc, variance, varianceLoc, index,
- nameLoc, name, colonLoc, boundInfo);
+ auto *TPDecl =
+ new (ctx, dc) ObjCTypeParamDecl(ctx, dc, variance, varianceLoc, index,
+ nameLoc, name, colonLoc, boundInfo);
+ QualType TPType = ctx.getObjCTypeParamType(TPDecl, {});
+ TPDecl->setTypeForDecl(TPType.getTypePtr());
+ return TPDecl;
}
ObjCTypeParamDecl *ObjCTypeParamDecl::CreateDeserialized(ASTContext &ctx,
@@ -1366,7 +1379,7 @@ ObjCTypeParamList *ObjCTypeParamList::create(
SourceLocation rAngleLoc) {
void *mem =
ctx.Allocate(totalSizeToAlloc<ObjCTypeParamDecl *>(typeParams.size()),
- llvm::alignOf<ObjCTypeParamList>());
+ alignof(ObjCTypeParamList));
return new (mem) ObjCTypeParamList(lAngleLoc, typeParams, rAngleLoc);
}
diff --git a/lib/AST/DeclOpenMP.cpp b/lib/AST/DeclOpenMP.cpp
index 5b06ce0778a7..95e44acca032 100644
--- a/lib/AST/DeclOpenMP.cpp
+++ b/lib/AST/DeclOpenMP.cpp
@@ -90,13 +90,18 @@ OMPDeclareReductionDecl::getPrevDeclInScope() const {
void OMPCapturedExprDecl::anchor() {}
OMPCapturedExprDecl *OMPCapturedExprDecl::Create(ASTContext &C, DeclContext *DC,
- IdentifierInfo *Id,
- QualType T) {
- return new (C, DC) OMPCapturedExprDecl(C, DC, Id, T);
+ IdentifierInfo *Id, QualType T,
+ SourceLocation StartLoc) {
+ return new (C, DC) OMPCapturedExprDecl(C, DC, Id, T, StartLoc);
}
OMPCapturedExprDecl *OMPCapturedExprDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
- return new (C, ID) OMPCapturedExprDecl(C, nullptr, nullptr, QualType());
+ return new (C, ID)
+ OMPCapturedExprDecl(C, nullptr, nullptr, QualType(), SourceLocation());
}
+SourceRange OMPCapturedExprDecl::getSourceRange() const {
+ assert(hasInit());
+ return SourceRange(getInit()->getLocStart(), getInit()->getLocEnd());
+}
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index 7e786990becb..b8ebe1c568c7 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -78,6 +78,10 @@ namespace {
void VisitTemplateDecl(const TemplateDecl *D);
void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
void VisitClassTemplateDecl(ClassTemplateDecl *D);
+ void VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D);
+ void VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D);
void VisitObjCMethodDecl(ObjCMethodDecl *D);
void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
@@ -95,8 +99,9 @@ namespace {
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
- void PrintTemplateParameters(const TemplateParameterList *Params,
- const TemplateArgumentList *Args = nullptr);
+ void printTemplateParameters(const TemplateParameterList *Params);
+ void printTemplateArguments(const TemplateArgumentList &Args,
+ const TemplateParameterList *Params = nullptr);
void prettyPrintAttributes(Decl *D);
void prettyPrintPragmas(Decl *D);
void printDeclType(QualType T, StringRef DeclName, bool Pack = false);
@@ -290,6 +295,13 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
if (D->isImplicit())
continue;
+ // Don't print implicit specializations, as they are printed when visiting
+ // corresponding templates.
+ if (auto FD = dyn_cast<FunctionDecl>(*D))
+ if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation &&
+ !isa<ClassTemplateSpecializationDecl>(DC))
+ continue;
+
// The next bits of code handles stuff like "struct {int x;} a,b"; we're
// forced to merge the declarations because there's no other way to
// refer to the struct in question. This limited merging is safe without
@@ -337,12 +349,19 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
const char *Terminator = nullptr;
if (isa<OMPThreadPrivateDecl>(*D) || isa<OMPDeclareReductionDecl>(*D))
Terminator = nullptr;
- else if (isa<FunctionDecl>(*D) &&
- cast<FunctionDecl>(*D)->isThisDeclarationADefinition())
+ else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->hasBody())
Terminator = nullptr;
- else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->getBody())
- Terminator = nullptr;
- else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) ||
+ else if (auto FD = dyn_cast<FunctionDecl>(*D)) {
+ if (FD->isThisDeclarationADefinition())
+ Terminator = nullptr;
+ else
+ Terminator = ";";
+ } else if (auto TD = dyn_cast<FunctionTemplateDecl>(*D)) {
+ if (TD->getTemplatedDecl()->isThisDeclarationADefinition())
+ Terminator = nullptr;
+ else
+ Terminator = ";";
+ } else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) ||
isa<ObjCImplementationDecl>(*D) ||
isa<ObjCInterfaceDecl>(*D) ||
isa<ObjCProtocolDecl>(*D) ||
@@ -359,7 +378,14 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
if (Terminator)
Out << Terminator;
- Out << "\n";
+ if (!Policy.TerseOutput &&
+ ((isa<FunctionDecl>(*D) &&
+ cast<FunctionDecl>(*D)->doesThisDeclarationHaveABody()) ||
+ (isa<FunctionTemplateDecl>(*D) &&
+ cast<FunctionTemplateDecl>(*D)->getTemplatedDecl()->doesThisDeclarationHaveABody())))
+ ; // StmtPrinter already added '\n' after CompoundStmt.
+ else
+ Out << "\n";
// Declare target attribute is special one, natural spelling for the pragma
// assumes "ending" construct so print it here.
@@ -408,7 +434,7 @@ void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
}
Out << *D;
- if (D->isFixed())
+ if (D->isFixed() && D->getASTContext().getLangOpts().CPlusPlus11)
Out << " : " << D->getIntegerType().stream(Policy);
if (D->isCompleteDefinition()) {
@@ -449,6 +475,9 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
!D->isFunctionTemplateSpecialization())
prettyPrintPragmas(D);
+ if (D->isFunctionTemplateSpecialization())
+ Out << "template<> ";
+
CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D);
CXXConversionDecl *ConversionDecl = dyn_cast<CXXConversionDecl>(D);
if (!Policy.SuppressSpecifiers) {
@@ -473,6 +502,11 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
PrintingPolicy SubPolicy(Policy);
SubPolicy.SuppressSpecifiers = false;
std::string Proto = D->getNameInfo().getAsString();
+ if (const TemplateArgumentList *TArgs = D->getTemplateSpecializationArgs()) {
+ llvm::raw_string_ostream POut(Proto);
+ DeclPrinter TArgPrinter(POut, SubPolicy, Indentation);
+ TArgPrinter.printTemplateArguments(*TArgs);
+ }
QualType Ty = D->getType();
while (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
@@ -636,25 +670,29 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Out << " = delete";
else if (D->isExplicitlyDefaulted())
Out << " = default";
- else if (D->doesThisDeclarationHaveABody() && !Policy.TerseOutput) {
- if (!D->hasPrototype() && D->getNumParams()) {
- // This is a K&R function definition, so we need to print the
- // parameters.
- Out << '\n';
- DeclPrinter ParamPrinter(Out, SubPolicy, Indentation);
- Indentation += Policy.Indentation;
- for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
- Indent();
- ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
- Out << ";\n";
- }
- Indentation -= Policy.Indentation;
- } else
- Out << ' ';
+ else if (D->doesThisDeclarationHaveABody()) {
+ if (!Policy.TerseOutput) {
+ if (!D->hasPrototype() && D->getNumParams()) {
+ // This is a K&R function definition, so we need to print the
+ // parameters.
+ Out << '\n';
+ DeclPrinter ParamPrinter(Out, SubPolicy, Indentation);
+ Indentation += Policy.Indentation;
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ Indent();
+ ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
+ Out << ";\n";
+ }
+ Indentation -= Policy.Indentation;
+ } else
+ Out << ' ';
- if (D->getBody())
- D->getBody()->printPretty(Out, nullptr, SubPolicy, Indentation);
- Out << '\n';
+ if (D->getBody())
+ D->getBody()->printPretty(Out, nullptr, SubPolicy, Indentation);
+ } else {
+ if (isa<CXXConstructorDecl>(*D))
+ Out << " {}";
+ }
}
}
@@ -662,7 +700,7 @@ void DeclPrinter::VisitFriendDecl(FriendDecl *D) {
if (TypeSourceInfo *TSI = D->getFriendType()) {
unsigned NumTPLists = D->getFriendTypeNumTemplateParameterLists();
for (unsigned i = 0; i < NumTPLists; ++i)
- PrintTemplateParameters(D->getFriendTypeTemplateParameterList(i));
+ printTemplateParameters(D->getFriendTypeTemplateParameterList(i));
Out << "friend ";
Out << " " << TSI->getType().getAsString(Policy);
}
@@ -839,9 +877,15 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
prettyPrintAttributes(D);
- if (D->getIdentifier())
+ if (D->getIdentifier()) {
Out << ' ' << *D;
+ if (auto S = dyn_cast<ClassTemplatePartialSpecializationDecl>(D))
+ printTemplateArguments(S->getTemplateArgs(), S->getTemplateParameters());
+ else if (auto S = dyn_cast<ClassTemplateSpecializationDecl>(D))
+ printTemplateArguments(S->getTemplateArgs());
+ }
+
if (D->isCompleteDefinition()) {
// Print the base classes
if (D->getNumBases()) {
@@ -868,9 +912,13 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
// Print the class definition
// FIXME: Doesn't print access specifiers, e.g., "public:"
- Out << " {\n";
- VisitDeclContext(D);
- Indent() << "}";
+ if (Policy.TerseOutput) {
+ Out << " {}";
+ } else {
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
}
}
@@ -893,10 +941,8 @@ void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
Visit(*D->decls_begin());
}
-void DeclPrinter::PrintTemplateParameters(const TemplateParameterList *Params,
- const TemplateArgumentList *Args) {
+void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params) {
assert(Params);
- assert(!Args || Params->size() == Args->size());
Out << "template <";
@@ -905,8 +951,7 @@ void DeclPrinter::PrintTemplateParameters(const TemplateParameterList *Params,
Out << ", ";
const Decl *Param = Params->getParam(i);
- if (const TemplateTypeParmDecl *TTP =
- dyn_cast<TemplateTypeParmDecl>(Param)) {
+ if (auto TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
if (TTP->wasDeclaredWithTypename())
Out << "typename ";
@@ -918,30 +963,22 @@ void DeclPrinter::PrintTemplateParameters(const TemplateParameterList *Params,
Out << *TTP;
- if (Args) {
- Out << " = ";
- Args->get(i).print(Policy, Out);
- } else if (TTP->hasDefaultArgument()) {
+ if (TTP->hasDefaultArgument()) {
Out << " = ";
Out << TTP->getDefaultArgument().getAsString(Policy);
};
- } else if (const NonTypeTemplateParmDecl *NTTP =
- dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ } else if (auto NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
StringRef Name;
if (IdentifierInfo *II = NTTP->getIdentifier())
Name = II->getName();
printDeclType(NTTP->getType(), Name, NTTP->isParameterPack());
- if (Args) {
- Out << " = ";
- Args->get(i).print(Policy, Out);
- } else if (NTTP->hasDefaultArgument()) {
+ if (NTTP->hasDefaultArgument()) {
Out << " = ";
NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy,
Indentation);
}
- } else if (const TemplateTemplateParmDecl *TTPD =
- dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ } else if (auto TTPD = dyn_cast<TemplateTemplateParmDecl>(Param)) {
VisitTemplateDecl(TTPD);
// FIXME: print the default argument, if present.
}
@@ -950,8 +987,46 @@ void DeclPrinter::PrintTemplateParameters(const TemplateParameterList *Params,
Out << "> ";
}
+void DeclPrinter::printTemplateArguments(const TemplateArgumentList &Args,
+ const TemplateParameterList *Params) {
+ Out << "<";
+ for (size_t I = 0, E = Args.size(); I < E; ++I) {
+ const TemplateArgument &A = Args[I];
+ if (I)
+ Out << ", ";
+ if (Params) {
+ if (A.getKind() == TemplateArgument::Type)
+ if (auto T = A.getAsType()->getAs<TemplateTypeParmType>()) {
+ auto P = cast<TemplateTypeParmDecl>(Params->getParam(T->getIndex()));
+ Out << *P;
+ continue;
+ }
+ if (A.getKind() == TemplateArgument::Template) {
+ if (auto T = A.getAsTemplate().getAsTemplateDecl())
+ if (auto TD = dyn_cast<TemplateTemplateParmDecl>(T)) {
+ auto P = cast<TemplateTemplateParmDecl>(
+ Params->getParam(TD->getIndex()));
+ Out << *P;
+ continue;
+ }
+ }
+ if (A.getKind() == TemplateArgument::Expression) {
+ if (auto E = dyn_cast<DeclRefExpr>(A.getAsExpr()))
+ if (auto N = dyn_cast<NonTypeTemplateParmDecl>(E->getDecl())) {
+ auto P = cast<NonTypeTemplateParmDecl>(
+ Params->getParam(N->getIndex()));
+ Out << *P;
+ continue;
+ }
+ }
+ }
+ A.print(Policy, Out);
+ }
+ Out << ">";
+}
+
void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
- PrintTemplateParameters(D->getTemplateParameters());
+ printTemplateParameters(D->getTemplateParameters());
if (const TemplateTemplateParmDecl *TTP =
dyn_cast<TemplateTemplateParmDecl>(D)) {
@@ -965,30 +1040,49 @@ void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
}
void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ prettyPrintPragmas(D->getTemplatedDecl());
+ VisitRedeclarableTemplateDecl(D);
+
if (PrintInstantiation) {
- TemplateParameterList *Params = D->getTemplateParameters();
- for (auto *I : D->specializations()) {
- prettyPrintPragmas(I);
- PrintTemplateParameters(Params, I->getTemplateSpecializationArgs());
- Visit(I);
- }
+ FunctionDecl *PrevDecl = D->getTemplatedDecl();
+ const FunctionDecl *Def;
+ if (PrevDecl->isDefined(Def) && Def != PrevDecl)
+ return;
+ for (auto *I : D->specializations())
+ if (I->getTemplateSpecializationKind() == TSK_ImplicitInstantiation) {
+ if (!PrevDecl->isThisDeclarationADefinition())
+ Out << ";\n";
+ Indent();
+ prettyPrintPragmas(I);
+ Visit(I);
+ }
}
-
- prettyPrintPragmas(D->getTemplatedDecl());
- return VisitRedeclarableTemplateDecl(D);
}
void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ VisitRedeclarableTemplateDecl(D);
+
if (PrintInstantiation) {
- TemplateParameterList *Params = D->getTemplateParameters();
- for (auto *I : D->specializations()) {
- PrintTemplateParameters(Params, &I->getTemplateArgs());
- Visit(I);
- Out << '\n';
- }
+ for (auto *I : D->specializations())
+ if (I->getSpecializationKind() == TSK_ImplicitInstantiation) {
+ if (D->isThisDeclarationADefinition())
+ Out << ";";
+ Out << "\n";
+ Visit(I);
+ }
}
+}
+
+void DeclPrinter::VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D) {
+ Out << "template<> ";
+ VisitCXXRecordDecl(D);
+}
- return VisitRedeclarableTemplateDecl(D);
+void DeclPrinter::VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D) {
+ printTemplateParameters(D->getTemplateParameters());
+ VisitCXXRecordDecl(D);
}
//----------------------------------------------------------------------------
@@ -1346,6 +1440,17 @@ void DeclPrinter::VisitUsingDecl(UsingDecl *D) {
if (D->hasTypename())
Out << "typename ";
D->getQualifier()->print(Out, Policy);
+
+ // Use the correct record name when the using declaration is used for
+ // inheriting constructors.
+ for (const auto *Shadow : D->shadows()) {
+ if (const auto *ConstructorShadow =
+ dyn_cast<ConstructorUsingShadowDecl>(Shadow)) {
+ assert(Shadow->getDeclContext() == ConstructorShadow->getDeclContext());
+ Out << *ConstructorShadow->getNominatedBaseClass();
+ return;
+ }
+ }
Out << *D;
}
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index 37943cdd5b7b..8643cbfcd960 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -31,10 +31,11 @@ using namespace clang;
TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
- SourceLocation RAngleLoc)
+ SourceLocation RAngleLoc,
+ Expr *RequiresClause)
: TemplateLoc(TemplateLoc), LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc),
- NumParams(Params.size()), ContainsUnexpandedParameterPack(false) {
- assert(this->NumParams == NumParams && "Too many template parameters");
+ NumParams(Params.size()), ContainsUnexpandedParameterPack(false),
+ HasRequiresClause(static_cast<bool>(RequiresClause)) {
for (unsigned Idx = 0; Idx < NumParams; ++Idx) {
NamedDecl *P = Params[Idx];
begin()[Idx] = P;
@@ -52,15 +53,21 @@ TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
// template parameter list does too.
}
}
+ if (RequiresClause) {
+ *getTrailingObjects<Expr *>() = RequiresClause;
+ }
}
-TemplateParameterList *TemplateParameterList::Create(
- const ASTContext &C, SourceLocation TemplateLoc, SourceLocation LAngleLoc,
- ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc) {
- void *Mem = C.Allocate(totalSizeToAlloc<NamedDecl *>(Params.size()),
- llvm::alignOf<TemplateParameterList>());
+TemplateParameterList *
+TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ArrayRef<NamedDecl *> Params,
+ SourceLocation RAngleLoc, Expr *RequiresClause) {
+ void *Mem = C.Allocate(totalSizeToAlloc<NamedDecl *, Expr *>(
+ Params.size(), RequiresClause ? 1u : 0u),
+ alignof(TemplateParameterList));
return new (Mem) TemplateParameterList(TemplateLoc, LAngleLoc, Params,
- RAngleLoc);
+ RAngleLoc, RequiresClause);
}
unsigned TemplateParameterList::getMinRequiredArguments() const {
@@ -197,44 +204,6 @@ void RedeclarableTemplateDecl::addSpecializationImpl(
SETraits::getDecl(Entry));
}
-/// \brief Generate the injected template arguments for the given template
-/// parameter list, e.g., for the injected-class-name of a class template.
-static void GenerateInjectedTemplateArgs(ASTContext &Context,
- TemplateParameterList *Params,
- TemplateArgument *Args) {
- for (NamedDecl *Param : *Params) {
- TemplateArgument Arg;
- if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
- QualType ArgType = Context.getTypeDeclType(TTP);
- if (TTP->isParameterPack())
- ArgType = Context.getPackExpansionType(ArgType, None);
-
- Arg = TemplateArgument(ArgType);
- } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- Expr *E = new (Context) DeclRefExpr(NTTP, /*enclosing*/ false,
- NTTP->getType().getNonLValueExprType(Context),
- Expr::getValueKindForType(NTTP->getType()),
- NTTP->getLocation());
-
- if (NTTP->isParameterPack())
- E = new (Context) PackExpansionExpr(Context.DependentTy, E,
- NTTP->getLocation(), None);
- Arg = TemplateArgument(E);
- } else {
- auto *TTP = cast<TemplateTemplateParmDecl>(Param);
- if (TTP->isParameterPack())
- Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>());
- else
- Arg = TemplateArgument(TemplateName(TTP));
- }
-
- if (Param->isTemplateParameterPack())
- Arg = TemplateArgument::CreatePackCopy(Context, Arg);
-
- *Args++ = Arg;
- }
-}
-
//===----------------------------------------------------------------------===//
// FunctionTemplateDecl Implementation
//===----------------------------------------------------------------------===//
@@ -303,10 +272,13 @@ ArrayRef<TemplateArgument> FunctionTemplateDecl::getInjectedTemplateArgs() {
TemplateParameterList *Params = getTemplateParameters();
Common *CommonPtr = getCommonPtr();
if (!CommonPtr->InjectedArgs) {
- CommonPtr->InjectedArgs
- = new (getASTContext()) TemplateArgument[Params->size()];
- GenerateInjectedTemplateArgs(getASTContext(), Params,
- CommonPtr->InjectedArgs);
+ auto &Context = getASTContext();
+ SmallVector<TemplateArgument, 16> TemplateArgs;
+ Context.getInjectedTemplateArgs(Params, TemplateArgs);
+ CommonPtr->InjectedArgs =
+ new (Context) TemplateArgument[TemplateArgs.size()];
+ std::copy(TemplateArgs.begin(), TemplateArgs.end(),
+ CommonPtr->InjectedArgs);
}
return llvm::makeArrayRef(CommonPtr->InjectedArgs, Params->size());
@@ -457,8 +429,7 @@ ClassTemplateDecl::getInjectedClassNameSpecialization() {
ASTContext &Context = getASTContext();
TemplateParameterList *Params = getTemplateParameters();
SmallVector<TemplateArgument, 16> TemplateArgs;
- TemplateArgs.resize(Params->size());
- GenerateInjectedTemplateArgs(getASTContext(), Params, TemplateArgs.data());
+ Context.getInjectedTemplateArgs(Params, TemplateArgs);
CommonPtr->InjectedClassNameType
= Context.getTemplateSpecializationType(TemplateName(this),
TemplateArgs);
@@ -754,9 +725,16 @@ void ClassTemplateSpecializationDecl::getNameForDiagnostic(
raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
- const TemplateArgumentList &TemplateArgs = getTemplateArgs();
- TemplateSpecializationType::PrintTemplateArgumentList(
- OS, TemplateArgs.asArray(), Policy);
+ auto *PS = dyn_cast<ClassTemplatePartialSpecializationDecl>(this);
+ if (const ASTTemplateArgumentListInfo *ArgsAsWritten =
+ PS ? PS->getTemplateArgsAsWritten() : nullptr) {
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, ArgsAsWritten->arguments(), Policy);
+ } else {
+ const TemplateArgumentList &TemplateArgs = getTemplateArgs();
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, TemplateArgs.asArray(), Policy);
+ }
}
ClassTemplateDecl *
@@ -1086,9 +1064,16 @@ void VarTemplateSpecializationDecl::getNameForDiagnostic(
raw_ostream &OS, const PrintingPolicy &Policy, bool Qualified) const {
NamedDecl::getNameForDiagnostic(OS, Policy, Qualified);
- const TemplateArgumentList &TemplateArgs = getTemplateArgs();
- TemplateSpecializationType::PrintTemplateArgumentList(
- OS, TemplateArgs.asArray(), Policy);
+ auto *PS = dyn_cast<VarTemplatePartialSpecializationDecl>(this);
+ if (const ASTTemplateArgumentListInfo *ArgsAsWritten =
+ PS ? PS->getTemplateArgsAsWritten() : nullptr) {
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, ArgsAsWritten->arguments(), Policy);
+ } else {
+ const TemplateArgumentList &TemplateArgs = getTemplateArgs();
+ TemplateSpecializationType::PrintTemplateArgumentList(
+ OS, TemplateArgs.asArray(), Policy);
+ }
}
VarTemplateDecl *VarTemplateSpecializationDecl::getSpecializedTemplate() const {
@@ -1169,7 +1154,7 @@ createMakeIntegerSeqParameterList(const ASTContext &C, DeclContext *DC) {
// <typename T, T ...Ints>
NamedDecl *P[2] = {T, N};
auto *TPL = TemplateParameterList::Create(
- C, SourceLocation(), SourceLocation(), P, SourceLocation());
+ C, SourceLocation(), SourceLocation(), P, SourceLocation(), nullptr);
// template <typename T, ...Ints> class IntSeq
auto *TemplateTemplateParm = TemplateTemplateParmDecl::Create(
@@ -1194,7 +1179,7 @@ createMakeIntegerSeqParameterList(const ASTContext &C, DeclContext *DC) {
// template <template <typename T, T ...Ints> class IntSeq, typename T, T N>
return TemplateParameterList::Create(C, SourceLocation(), SourceLocation(),
- Params, SourceLocation());
+ Params, SourceLocation(), nullptr);
}
static TemplateParameterList *
@@ -1215,7 +1200,7 @@ createTypePackElementParameterList(const ASTContext &C, DeclContext *DC) {
NamedDecl *Params[] = {Index, Ts};
return TemplateParameterList::Create(C, SourceLocation(), SourceLocation(),
llvm::makeArrayRef(Params),
- SourceLocation());
+ SourceLocation(), nullptr);
}
static TemplateParameterList *createBuiltinTemplateParameterList(
diff --git a/lib/AST/DeclarationName.cpp b/lib/AST/DeclarationName.cpp
index 2a988e1d22d0..52791e51d2dc 100644
--- a/lib/AST/DeclarationName.cpp
+++ b/lib/AST/DeclarationName.cpp
@@ -11,14 +11,13 @@
// classes.
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/DeclarationName.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclarationName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/IdentifierTable.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -96,12 +95,18 @@ int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
case DeclarationName::ObjCMultiArgSelector: {
Selector LHSSelector = LHS.getObjCSelector();
Selector RHSSelector = RHS.getObjCSelector();
+ // getNumArgs for ZeroArgSelector returns 0, but we still need to compare.
+ if (LHS.getNameKind() == DeclarationName::ObjCZeroArgSelector &&
+ RHS.getNameKind() == DeclarationName::ObjCZeroArgSelector) {
+ return LHSSelector.getAsIdentifierInfo()->getName().compare(
+ RHSSelector.getAsIdentifierInfo()->getName());
+ }
unsigned LN = LHSSelector.getNumArgs(), RN = RHSSelector.getNumArgs();
for (unsigned I = 0, N = std::min(LN, RN); I != N; ++I) {
switch (LHSSelector.getNameForSlot(I).compare(
RHSSelector.getNameForSlot(I))) {
- case -1: return true;
- case 1: return false;
+ case -1: return -1;
+ case 1: return 1;
default: break;
}
}
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 091e8787d8b6..93f3ad5f2bdd 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
@@ -36,9 +35,33 @@
#include <cstring>
using namespace clang;
-const CXXRecordDecl *Expr::getBestDynamicClassType() const {
- const Expr *E = ignoreParenBaseCasts();
+const Expr *Expr::getBestDynamicClassTypeExpr() const {
+ const Expr *E = this;
+ while (true) {
+ E = E->ignoreParenBaseCasts();
+
+ // Follow the RHS of a comma operator.
+ if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma) {
+ E = BO->getRHS();
+ continue;
+ }
+ }
+
+ // Step into initializer for materialized temporaries.
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = MTE->GetTemporaryExpr();
+ continue;
+ }
+
+ break;
+ }
+
+ return E;
+}
+const CXXRecordDecl *Expr::getBestDynamicClassType() const {
+ const Expr *E = getBestDynamicClassTypeExpr();
QualType DerivedType = E->getType();
if (const PointerType *PTy = DerivedType->getAs<PointerType>())
DerivedType = PTy->getPointeeType();
@@ -403,7 +426,7 @@ DeclRefExpr *DeclRefExpr::Create(const ASTContext &Context,
HasTemplateKWAndArgsInfo ? 1 : 0,
TemplateArgs ? TemplateArgs->size() : 0);
- void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
+ void *Mem = Context.Allocate(Size, alignof(DeclRefExpr));
return new (Mem) DeclRefExpr(Context, QualifierLoc, TemplateKWLoc, D,
RefersToEnclosingVariableOrCapture,
NameInfo, FoundD, TemplateArgs, T, VK);
@@ -420,7 +443,7 @@ DeclRefExpr *DeclRefExpr::CreateEmpty(const ASTContext &Context,
ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasQualifier ? 1 : 0, HasFoundDecl ? 1 : 0, HasTemplateKWAndArgsInfo,
NumTemplateArgs);
- void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
+ void *Mem = Context.Allocate(Size, alignof(DeclRefExpr));
return new (Mem) DeclRefExpr(EmptyShell());
}
@@ -495,20 +518,21 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
}
return "";
}
- if (auto *BD = dyn_cast<BlockDecl>(CurrentDecl)) {
- std::unique_ptr<MangleContext> MC;
- MC.reset(Context.createMangleContext());
- SmallString<256> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
+ if (isa<BlockDecl>(CurrentDecl)) {
+ // For blocks we only emit something if it is enclosed in a function
+ // For top-level block we'd like to include the name of variable, but we
+ // don't have it at this point.
auto DC = CurrentDecl->getDeclContext();
if (DC->isFileContext())
- MC->mangleGlobalBlock(BD, /*ID*/ nullptr, Out);
- else if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
- MC->mangleCtorBlock(CD, /*CT*/ Ctor_Complete, BD, Out);
- else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
- MC->mangleDtorBlock(DD, /*DT*/ Dtor_Complete, BD, Out);
- else
- MC->mangleBlock(DC, BD, Out);
+ return "";
+
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ if (auto *DCBlock = dyn_cast<BlockDecl>(DC))
+ // For nested blocks, propagate up to the parent.
+ Out << ComputeName(IT, DCBlock);
+ else if (auto *DCDecl = dyn_cast<Decl>(DC))
+ Out << ComputeName(IT, DCDecl) << "_block_invoke";
return Out.str();
}
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
@@ -538,12 +562,14 @@ std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
FT = dyn_cast<FunctionProtoType>(AFT);
if (IT == FuncSig) {
+ assert(FT && "We must have a written prototype in this case.");
switch (FT->getCallConv()) {
case CC_C: POut << "__cdecl "; break;
case CC_X86StdCall: POut << "__stdcall "; break;
case CC_X86FastCall: POut << "__fastcall "; break;
case CC_X86ThisCall: POut << "__thiscall "; break;
case CC_X86VectorCall: POut << "__vectorcall "; break;
+ case CC_X86RegCall: POut << "__regcall "; break;
// Only bother printing the conventions that MSVC knows about.
default: break;
}
@@ -756,33 +782,33 @@ FloatingLiteral::Create(const ASTContext &C, EmptyShell Empty) {
const llvm::fltSemantics &FloatingLiteral::getSemantics() const {
switch(FloatingLiteralBits.Semantics) {
case IEEEhalf:
- return llvm::APFloat::IEEEhalf;
+ return llvm::APFloat::IEEEhalf();
case IEEEsingle:
- return llvm::APFloat::IEEEsingle;
+ return llvm::APFloat::IEEEsingle();
case IEEEdouble:
- return llvm::APFloat::IEEEdouble;
+ return llvm::APFloat::IEEEdouble();
case x87DoubleExtended:
- return llvm::APFloat::x87DoubleExtended;
+ return llvm::APFloat::x87DoubleExtended();
case IEEEquad:
- return llvm::APFloat::IEEEquad;
+ return llvm::APFloat::IEEEquad();
case PPCDoubleDouble:
- return llvm::APFloat::PPCDoubleDouble;
+ return llvm::APFloat::PPCDoubleDouble();
}
llvm_unreachable("Unrecognised floating semantics");
}
void FloatingLiteral::setSemantics(const llvm::fltSemantics &Sem) {
- if (&Sem == &llvm::APFloat::IEEEhalf)
+ if (&Sem == &llvm::APFloat::IEEEhalf())
FloatingLiteralBits.Semantics = IEEEhalf;
- else if (&Sem == &llvm::APFloat::IEEEsingle)
+ else if (&Sem == &llvm::APFloat::IEEEsingle())
FloatingLiteralBits.Semantics = IEEEsingle;
- else if (&Sem == &llvm::APFloat::IEEEdouble)
+ else if (&Sem == &llvm::APFloat::IEEEdouble())
FloatingLiteralBits.Semantics = IEEEdouble;
- else if (&Sem == &llvm::APFloat::x87DoubleExtended)
+ else if (&Sem == &llvm::APFloat::x87DoubleExtended())
FloatingLiteralBits.Semantics = x87DoubleExtended;
- else if (&Sem == &llvm::APFloat::IEEEquad)
+ else if (&Sem == &llvm::APFloat::IEEEquad())
FloatingLiteralBits.Semantics = IEEEquad;
- else if (&Sem == &llvm::APFloat::PPCDoubleDouble)
+ else if (&Sem == &llvm::APFloat::PPCDoubleDouble())
FloatingLiteralBits.Semantics = PPCDoubleDouble;
else
llvm_unreachable("Unknown floating semantics");
@@ -794,7 +820,7 @@ void FloatingLiteral::setSemantics(const llvm::fltSemantics &Sem) {
double FloatingLiteral::getValueAsApproximateDouble() const {
llvm::APFloat V = getValue();
bool ignored;
- V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
+ V.convert(llvm::APFloat::IEEEdouble(), llvm::APFloat::rmNearestTiesToEven,
&ignored);
return V.convertToDouble();
}
@@ -832,9 +858,9 @@ StringLiteral *StringLiteral::Create(const ASTContext &C, StringRef Str,
// Allocate enough space for the StringLiteral plus an array of locations for
// any concatenated string tokens.
- void *Mem = C.Allocate(sizeof(StringLiteral)+
- sizeof(SourceLocation)*(NumStrs-1),
- llvm::alignOf<StringLiteral>());
+ void *Mem =
+ C.Allocate(sizeof(StringLiteral) + sizeof(SourceLocation) * (NumStrs - 1),
+ alignof(StringLiteral));
StringLiteral *SL = new (Mem) StringLiteral(Ty);
// OPTIMIZE: could allocate this appended to the StringLiteral.
@@ -850,9 +876,9 @@ StringLiteral *StringLiteral::Create(const ASTContext &C, StringRef Str,
StringLiteral *StringLiteral::CreateEmpty(const ASTContext &C,
unsigned NumStrs) {
- void *Mem = C.Allocate(sizeof(StringLiteral)+
- sizeof(SourceLocation)*(NumStrs-1),
- llvm::alignOf<StringLiteral>());
+ void *Mem =
+ C.Allocate(sizeof(StringLiteral) + sizeof(SourceLocation) * (NumStrs - 1),
+ alignof(StringLiteral));
StringLiteral *SL = new (Mem) StringLiteral(QualType());
SL->CharByteWidth = 0;
SL->Length = 0;
@@ -944,10 +970,13 @@ void StringLiteral::outputString(raw_ostream &OS) const {
// Handle some common non-printable cases to make dumps prettier.
case '\\': OS << "\\\\"; break;
case '"': OS << "\\\""; break;
- case '\n': OS << "\\n"; break;
- case '\t': OS << "\\t"; break;
case '\a': OS << "\\a"; break;
case '\b': OS << "\\b"; break;
+ case '\f': OS << "\\f"; break;
+ case '\n': OS << "\\n"; break;
+ case '\r': OS << "\\r"; break;
+ case '\t': OS << "\\t"; break;
+ case '\v': OS << "\\v"; break;
}
}
OS << '"';
@@ -1182,8 +1211,16 @@ void CallExpr::updateDependenciesFromArg(Expr *Arg) {
ExprBits.ContainsUnexpandedParameterPack = true;
}
+FunctionDecl *CallExpr::getDirectCallee() {
+ return dyn_cast_or_null<FunctionDecl>(getCalleeDecl());
+}
+
Decl *CallExpr::getCalleeDecl() {
- Expr *CEE = getCallee()->IgnoreParenImpCasts();
+ return getCallee()->getReferencedDeclOfCallee();
+}
+
+Decl *Expr::getReferencedDeclOfCallee() {
+ Expr *CEE = IgnoreParenImpCasts();
while (SubstNonTypeTemplateParmExpr *NTTP
= dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
@@ -1206,10 +1243,6 @@ Decl *CallExpr::getCalleeDecl() {
return nullptr;
}
-FunctionDecl *CallExpr::getDirectCallee() {
- return dyn_cast_or_null<FunctionDecl>(getCalleeDecl());
-}
-
/// setNumArgs - This changes the number of arguments present in this call.
/// Any orphaned expressions are deleted by this, and any new operands are set
/// to null.
@@ -1417,7 +1450,7 @@ MemberExpr *MemberExpr::Create(
HasTemplateKWAndArgsInfo ? 1 : 0,
targs ? targs->size() : 0);
- void *Mem = C.Allocate(Size, llvm::alignOf<MemberExpr>());
+ void *Mem = C.Allocate(Size, alignof(MemberExpr));
MemberExpr *E = new (Mem)
MemberExpr(base, isarrow, OperatorLoc, memberdecl, nameinfo, ty, vk, ok);
@@ -1570,6 +1603,8 @@ bool CastExpr::CastConsistency() const {
case CK_ARCReclaimReturnedObject:
case CK_ARCExtendBlockObject:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
+ case CK_IntToOCLSampler:
assert(!getType()->isBooleanType() && "unheralded conversion to bool");
goto CheckNoBasePath;
@@ -1830,6 +1865,24 @@ bool InitListExpr::isStringLiteralInit() const {
return isa<StringLiteral>(Init) || isa<ObjCEncodeExpr>(Init);
}
+bool InitListExpr::isTransparent() const {
+ assert(isSemanticForm() && "syntactic form never semantically transparent");
+
+ // A glvalue InitListExpr is always just sugar.
+ if (isGLValue()) {
+ assert(getNumInits() == 1 && "multiple inits in glvalue init list");
+ return true;
+ }
+
+ // Otherwise, we're sugar if and only if we have exactly one initializer that
+ // is of the same type.
+ if (getNumInits() != 1 || !getInit(0))
+ return false;
+
+ return getType().getCanonicalType() ==
+ getInit(0)->getType().getCanonicalType();
+}
+
SourceLocation InitListExpr::getLocStart() const {
if (InitListExpr *SyntacticForm = getSyntacticForm())
return SyntacticForm->getLocStart();
@@ -2212,12 +2265,15 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
// effects (e.g. a placement new with an uninitialized POD).
case CXXDeleteExprClass:
return false;
+ case MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(this)->GetTemporaryExpr()
+ ->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
case CXXBindTemporaryExprClass:
- return (cast<CXXBindTemporaryExpr>(this)
- ->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
+ return cast<CXXBindTemporaryExpr>(this)->getSubExpr()
+ ->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
case ExprWithCleanupsClass:
- return (cast<ExprWithCleanups>(this)
- ->getSubExpr()->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx));
+ return cast<ExprWithCleanups>(this)->getSubExpr()
+ ->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
}
@@ -2748,7 +2804,8 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
CE->getCastKind() == CK_ToUnion ||
CE->getCastKind() == CK_ConstructorConversion ||
CE->getCastKind() == CK_NonAtomicToAtomic ||
- CE->getCastKind() == CK_AtomicToNonAtomic)
+ CE->getCastKind() == CK_AtomicToNonAtomic ||
+ CE->getCastKind() == CK_IntToOCLSampler)
return CE->getSubExpr()->isConstantInitializer(Ctx, false, Culprit);
break;
@@ -2843,6 +2900,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case UnaryExprOrTypeTraitExprClass:
case AddrLabelExprClass:
case GNUNullExprClass:
+ case ArrayInitIndexExprClass:
case NoInitExprClass:
case CXXBoolLiteralExprClass:
case CXXNullPtrLiteralExprClass:
@@ -2919,6 +2977,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case ExtVectorElementExprClass:
case DesignatedInitExprClass:
case DesignatedInitUpdateExprClass:
+ case ArrayInitLoopExprClass:
case ParenListExprClass:
case CXXPseudoDestructorExprClass:
case CXXStdInitializerListExprClass:
@@ -3307,11 +3366,16 @@ FieldDecl *Expr::getSourceBitField() {
if (Ivar->isBitField())
return Ivar;
- if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E))
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E)) {
if (FieldDecl *Field = dyn_cast<FieldDecl>(DeclRef->getDecl()))
if (Field->isBitField())
return Field;
+ if (BindingDecl *BD = dyn_cast<BindingDecl>(DeclRef->getDecl()))
+ if (Expr *E = BD->getBinding())
+ return E->getSourceBitField();
+ }
+
if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E)) {
if (BinOp->isAssignmentOp() && BinOp->getLHS())
return BinOp->getLHS()->getSourceBitField();
@@ -3328,6 +3392,7 @@ FieldDecl *Expr::getSourceBitField() {
}
bool Expr::refersToVectorElement() const {
+ // FIXME: Why do we not just look at the ObjectKind here?
const Expr *E = this->IgnoreParens();
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
@@ -3344,6 +3409,11 @@ bool Expr::refersToVectorElement() const {
if (isa<ExtVectorElementExpr>(E))
return true;
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E))
+ if (auto *BD = dyn_cast<BindingDecl>(DRE->getDecl()))
+ if (auto *E = BD->getBinding())
+ return E->refersToVectorElement();
+
return false;
}
@@ -3396,8 +3466,11 @@ bool ExtVectorElementExpr::containsDuplicateElements() const {
void ExtVectorElementExpr::getEncodedElementAccess(
SmallVectorImpl<uint32_t> &Elts) const {
StringRef Comp = Accessor->getName();
- if (Comp[0] == 's' || Comp[0] == 'S')
+ bool isNumericAccessor = false;
+ if (Comp[0] == 's' || Comp[0] == 'S') {
Comp = Comp.substr(1);
+ isNumericAccessor = true;
+ }
bool isHi = Comp == "hi";
bool isLo = Comp == "lo";
@@ -3416,7 +3489,7 @@ void ExtVectorElementExpr::getEncodedElementAccess(
else if (isOdd)
Index = 2 * i + 1;
else
- Index = ExtVectorType::getAccessorIdx(Comp[i]);
+ Index = ExtVectorType::getAccessorIdx(Comp[i], isNumericAccessor);
Elts.push_back(Index);
}
@@ -3589,7 +3662,7 @@ DesignatedInitExpr::Create(const ASTContext &C,
SourceLocation ColonOrEqualLoc,
bool UsesColonSyntax, Expr *Init) {
void *Mem = C.Allocate(totalSizeToAlloc<Stmt *>(IndexExprs.size() + 1),
- llvm::alignOf<DesignatedInitExpr>());
+ alignof(DesignatedInitExpr));
return new (Mem) DesignatedInitExpr(C, C.VoidTy, Designators,
ColonOrEqualLoc, UsesColonSyntax,
IndexExprs, Init);
@@ -3598,7 +3671,7 @@ DesignatedInitExpr::Create(const ASTContext &C,
DesignatedInitExpr *DesignatedInitExpr::CreateEmpty(const ASTContext &C,
unsigned NumIndexExprs) {
void *Mem = C.Allocate(totalSizeToAlloc<Stmt *>(NumIndexExprs + 1),
- llvm::alignOf<DesignatedInitExpr>());
+ alignof(DesignatedInitExpr));
return new (Mem) DesignatedInitExpr(NumIndexExprs + 1);
}
@@ -3738,7 +3811,7 @@ PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &Context,
unsigned numSemanticExprs) {
void *buffer =
Context.Allocate(totalSizeToAlloc<Expr *>(1 + numSemanticExprs),
- llvm::alignOf<PseudoObjectExpr>());
+ alignof(PseudoObjectExpr));
return new(buffer) PseudoObjectExpr(sh, numSemanticExprs);
}
@@ -3766,7 +3839,7 @@ PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &C, Expr *syntax,
}
void *buffer = C.Allocate(totalSizeToAlloc<Expr *>(semantics.size() + 1),
- llvm::alignOf<PseudoObjectExpr>());
+ alignof(PseudoObjectExpr));
return new(buffer) PseudoObjectExpr(type, VK, syntax, semantics,
resultIndex);
}
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index a13033d47467..ad510e0070e6 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -25,6 +25,22 @@ using namespace clang;
// Child Iterators for iterating over subexpressions/substatements
//===----------------------------------------------------------------------===//
+bool CXXOperatorCallExpr::isInfixBinaryOp() const {
+ // An infix binary operator is any operator with two arguments other than
+ // operator() and operator[]. Note that none of these operators can have
+ // default arguments, so it suffices to check the number of argument
+ // expressions.
+ if (getNumArgs() != 2)
+ return false;
+
+ switch (getOperator()) {
+ case OO_Call: case OO_Subscript:
+ return false;
+ default:
+ return true;
+ }
+}
+
bool CXXTypeidExpr::isPotentiallyEvaluated() const {
if (isTypeOperand())
return false;
@@ -62,7 +78,7 @@ SourceLocation CXXScalarValueInitExpr::getLocStart() const {
// CXXNewExpr
CXXNewExpr::CXXNewExpr(const ASTContext &C, bool globalNew,
FunctionDecl *operatorNew, FunctionDecl *operatorDelete,
- bool usualArrayDeleteWantsSize,
+ bool PassAlignment, bool usualArrayDeleteWantsSize,
ArrayRef<Expr*> placementArgs,
SourceRange typeIdParens, Expr *arraySize,
InitializationStyle initializationStyle,
@@ -76,7 +92,8 @@ CXXNewExpr::CXXNewExpr(const ASTContext &C, bool globalNew,
SubExprs(nullptr), OperatorNew(operatorNew), OperatorDelete(operatorDelete),
AllocatedTypeInfo(allocatedTypeInfo), TypeIdParens(typeIdParens),
Range(Range), DirectInitRange(directInitRange),
- GlobalNew(globalNew), UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) {
+ GlobalNew(globalNew), PassAlignment(PassAlignment),
+ UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) {
assert((initializer != nullptr || initializationStyle == NoInit) &&
"Only NoInit can have no initializer.");
StoredInitializationStyle = initializer ? initializationStyle + 1 : 0;
@@ -226,7 +243,7 @@ UnresolvedLookupExpr::Create(const ASTContext &C,
std::size_t Size =
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(1,
num_args);
- void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedLookupExpr>());
+ void *Mem = C.Allocate(Size, alignof(UnresolvedLookupExpr));
return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
TemplateKWLoc, NameInfo,
ADL, /*Overload*/ true, Args,
@@ -241,7 +258,7 @@ UnresolvedLookupExpr::CreateEmpty(const ASTContext &C,
std::size_t Size =
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasTemplateKWAndArgsInfo, NumTemplateArgs);
- void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedLookupExpr>());
+ void *Mem = C.Allocate(Size, alignof(UnresolvedLookupExpr));
UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
@@ -284,9 +301,8 @@ OverloadExpr::OverloadExpr(StmtClass K, const ASTContext &C,
}
}
- Results = static_cast<DeclAccessPair *>(
- C.Allocate(sizeof(DeclAccessPair) * NumResults,
- llvm::alignOf<DeclAccessPair>()));
+ Results = static_cast<DeclAccessPair *>(C.Allocate(
+ sizeof(DeclAccessPair) * NumResults, alignof(DeclAccessPair)));
memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
}
@@ -323,11 +339,11 @@ void OverloadExpr::initializeResults(const ASTContext &C,
assert(!Results && "Results already initialized!");
NumResults = End - Begin;
if (NumResults) {
- Results = static_cast<DeclAccessPair *>(
- C.Allocate(sizeof(DeclAccessPair) * NumResults,
-
- llvm::alignOf<DeclAccessPair>()));
- memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
+ Results = static_cast<DeclAccessPair *>(
+ C.Allocate(sizeof(DeclAccessPair) * NumResults,
+
+ alignof(DeclAccessPair)));
+ memcpy(Results, Begin.I, NumResults * sizeof(DeclAccessPair));
}
}
@@ -853,8 +869,6 @@ LambdaExpr::LambdaExpr(QualType T, SourceRange IntroducerRange,
SourceLocation CaptureDefaultLoc,
ArrayRef<LambdaCapture> Captures, bool ExplicitParams,
bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
- ArrayRef<VarDecl *> ArrayIndexVars,
- ArrayRef<unsigned> ArrayIndexStarts,
SourceLocation ClosingBrace,
bool ContainsUnexpandedParameterPack)
: Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary, T->isDependentType(),
@@ -891,17 +905,6 @@ LambdaExpr::LambdaExpr(QualType T, SourceRange IntroducerRange,
// Copy the body of the lambda.
*Stored++ = getCallOperator()->getBody();
-
- // Copy the array index variables, if any.
- HasArrayIndexVars = !ArrayIndexVars.empty();
- if (HasArrayIndexVars) {
- assert(ArrayIndexStarts.size() == NumCaptures);
- memcpy(getArrayIndexVars(), ArrayIndexVars.data(),
- sizeof(VarDecl *) * ArrayIndexVars.size());
- memcpy(getArrayIndexStarts(), ArrayIndexStarts.data(),
- sizeof(unsigned) * Captures.size());
- getArrayIndexStarts()[Captures.size()] = ArrayIndexVars.size();
- }
}
LambdaExpr *LambdaExpr::Create(
@@ -909,31 +912,24 @@ LambdaExpr *LambdaExpr::Create(
SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc, ArrayRef<LambdaCapture> Captures,
bool ExplicitParams, bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
- ArrayRef<VarDecl *> ArrayIndexVars, ArrayRef<unsigned> ArrayIndexStarts,
SourceLocation ClosingBrace, bool ContainsUnexpandedParameterPack) {
// Determine the type of the expression (i.e., the type of the
// function object we're creating).
QualType T = Context.getTypeDeclType(Class);
- unsigned Size = totalSizeToAlloc<Stmt *, unsigned, VarDecl *>(
- Captures.size() + 1, ArrayIndexVars.empty() ? 0 : Captures.size() + 1,
- ArrayIndexVars.size());
+ unsigned Size = totalSizeToAlloc<Stmt *>(Captures.size() + 1);
void *Mem = Context.Allocate(Size);
- return new (Mem) LambdaExpr(T, IntroducerRange,
- CaptureDefault, CaptureDefaultLoc, Captures,
- ExplicitParams, ExplicitResultType,
- CaptureInits, ArrayIndexVars, ArrayIndexStarts,
- ClosingBrace, ContainsUnexpandedParameterPack);
+ return new (Mem)
+ LambdaExpr(T, IntroducerRange, CaptureDefault, CaptureDefaultLoc,
+ Captures, ExplicitParams, ExplicitResultType, CaptureInits,
+ ClosingBrace, ContainsUnexpandedParameterPack);
}
LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C,
- unsigned NumCaptures,
- unsigned NumArrayIndexVars) {
- unsigned Size = totalSizeToAlloc<Stmt *, unsigned, VarDecl *>(
- NumCaptures + 1, NumArrayIndexVars ? NumCaptures + 1 : 0,
- NumArrayIndexVars);
+ unsigned NumCaptures) {
+ unsigned Size = totalSizeToAlloc<Stmt *>(NumCaptures + 1);
void *Mem = C.Allocate(Size);
- return new (Mem) LambdaExpr(EmptyShell(), NumCaptures, NumArrayIndexVars > 0);
+ return new (Mem) LambdaExpr(EmptyShell(), NumCaptures);
}
bool LambdaExpr::isInitCapture(const LambdaCapture *C) const {
@@ -979,19 +975,6 @@ LambdaExpr::capture_range LambdaExpr::implicit_captures() const {
return capture_range(implicit_capture_begin(), implicit_capture_end());
}
-ArrayRef<VarDecl *>
-LambdaExpr::getCaptureInitIndexVars(const_capture_init_iterator Iter) const {
- assert(HasArrayIndexVars && "No array index-var data?");
-
- unsigned Index = Iter - capture_init_begin();
- assert(Index < getLambdaClass()->getLambdaData().NumCaptures &&
- "Capture index out-of-range");
- VarDecl *const *IndexVars = getArrayIndexVars();
- const unsigned *IndexStarts = getArrayIndexStarts();
- return llvm::makeArrayRef(IndexVars + IndexStarts[Index],
- IndexVars + IndexStarts[Index + 1]);
-}
-
CXXRecordDecl *LambdaExpr::getLambdaClass() const {
return getType()->getAsCXXRecordDecl();
}
@@ -1041,7 +1024,7 @@ ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C, Expr *subexpr,
bool CleanupsHaveSideEffects,
ArrayRef<CleanupObject> objects) {
void *buffer = C.Allocate(totalSizeToAlloc<CleanupObject>(objects.size()),
- llvm::alignOf<ExprWithCleanups>());
+ alignof(ExprWithCleanups));
return new (buffer)
ExprWithCleanups(subexpr, CleanupsHaveSideEffects, objects);
}
@@ -1055,7 +1038,7 @@ ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C,
EmptyShell empty,
unsigned numObjects) {
void *buffer = C.Allocate(totalSizeToAlloc<CleanupObject>(numObjects),
- llvm::alignOf<ExprWithCleanups>());
+ alignof(ExprWithCleanups));
return new (buffer) ExprWithCleanups(empty, numObjects);
}
@@ -1154,7 +1137,7 @@ CXXDependentScopeMemberExpr::Create(const ASTContext &C,
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasTemplateKWAndArgsInfo, NumTemplateArgs);
- void *Mem = C.Allocate(Size, llvm::alignOf<CXXDependentScopeMemberExpr>());
+ void *Mem = C.Allocate(Size, alignof(CXXDependentScopeMemberExpr));
return new (Mem) CXXDependentScopeMemberExpr(C, Base, BaseType,
IsArrow, OperatorLoc,
QualifierLoc,
@@ -1171,7 +1154,7 @@ CXXDependentScopeMemberExpr::CreateEmpty(const ASTContext &C,
std::size_t Size =
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasTemplateKWAndArgsInfo, NumTemplateArgs);
- void *Mem = C.Allocate(Size, llvm::alignOf<CXXDependentScopeMemberExpr>());
+ void *Mem = C.Allocate(Size, alignof(CXXDependentScopeMemberExpr));
CXXDependentScopeMemberExpr *E
= new (Mem) CXXDependentScopeMemberExpr(C, nullptr, QualType(),
0, SourceLocation(),
@@ -1255,7 +1238,7 @@ UnresolvedMemberExpr *UnresolvedMemberExpr::Create(
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasTemplateKWAndArgsInfo, TemplateArgs ? TemplateArgs->size() : 0);
- void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedMemberExpr>());
+ void *Mem = C.Allocate(Size, alignof(UnresolvedMemberExpr));
return new (Mem) UnresolvedMemberExpr(
C, HasUnresolvedUsing, Base, BaseType, IsArrow, OperatorLoc, QualifierLoc,
TemplateKWLoc, MemberNameInfo, TemplateArgs, Begin, End);
@@ -1270,7 +1253,7 @@ UnresolvedMemberExpr::CreateEmpty(const ASTContext &C,
totalSizeToAlloc<ASTTemplateKWAndArgsInfo, TemplateArgumentLoc>(
HasTemplateKWAndArgsInfo, NumTemplateArgs);
- void *Mem = C.Allocate(Size, llvm::alignOf<UnresolvedMemberExpr>());
+ void *Mem = C.Allocate(Size, alignof(UnresolvedMemberExpr));
UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
return E;
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index 89cc9bc18ef0..adb74b80b198 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -141,10 +141,9 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return Cl::CL_LValue;
// C99 6.5.2.5p5 says that compound literals are lvalues.
- // In C++, they're prvalue temporaries.
+ // In C++, they're prvalue temporaries, except for file-scope arrays.
case Expr::CompoundLiteralExprClass:
- return Ctx.getLangOpts().CPlusPlus ? ClassifyTemporary(E->getType())
- : Cl::CL_LValue;
+ return !E->isLValue() ? ClassifyTemporary(E->getType()) : Cl::CL_LValue;
// Expressions that are prvalues.
case Expr::CXXBoolLiteralExprClass:
@@ -186,6 +185,8 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::ObjCIndirectCopyRestoreExprClass:
case Expr::AtomicExprClass:
case Expr::CXXFoldExprClass:
+ case Expr::ArrayInitLoopExprClass:
+ case Expr::ArrayInitIndexExprClass:
case Expr::NoInitExprClass:
case Expr::DesignatedInitUpdateExprClass:
case Expr::CoyieldExprClass:
@@ -196,11 +197,20 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
return ClassifyInternal(Ctx,
cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
- // C++ [expr.sub]p1: The result is an lvalue of type "T".
- // However, subscripting vector types is more like member access.
+ // C, C++98 [expr.sub]p1: The result is an lvalue of type "T".
+ // C++11 (DR1213): in the case of an array operand, the result is an lvalue
+ // if that operand is an lvalue and an xvalue otherwise.
+ // Subscripting vector types is more like member access.
case Expr::ArraySubscriptExprClass:
if (cast<ArraySubscriptExpr>(E)->getBase()->getType()->isVectorType())
return ClassifyInternal(Ctx, cast<ArraySubscriptExpr>(E)->getBase());
+ if (Lang.CPlusPlus11) {
+ // Step over the array-to-pointer decay if present, but not over the
+ // temporary materialization.
+ auto *Base = cast<ArraySubscriptExpr>(E)->getBase()->IgnoreImpCasts();
+ if (Base->getType()->isArrayType())
+ return ClassifyInternal(Ctx, Base);
+ }
return Cl::CL_LValue;
// C++ [expr.prim.general]p3: The result is an lvalue if the entity is a
@@ -429,6 +439,7 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
else
islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
isa<IndirectFieldDecl>(D) ||
+ isa<BindingDecl>(D) ||
(Ctx.getLangOpts().CPlusPlus &&
(isa<FunctionDecl>(D) || isa<MSPropertyDecl>(D) ||
isa<FunctionTemplateDecl>(D)));
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index df944e8f25f2..b3f8925b6464 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -36,6 +36,7 @@
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/ASTLambda.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
@@ -43,7 +44,6 @@
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/TargetInfo.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
#include <functional>
@@ -76,8 +76,8 @@ namespace {
const Expr *Inner = Temp->skipRValueSubobjectAdjustments(CommaLHSs,
Adjustments);
// Keep any cv-qualifiers from the reference if we generated a temporary
- // for it.
- if (Inner != Temp)
+ // for it directly. Otherwise use the type after adjustment.
+ if (!Adjustments.empty())
return Inner->getType();
}
@@ -109,19 +109,57 @@ namespace {
return getAsBaseOrMember(E).getInt();
}
+ /// Given a CallExpr, try to get the alloc_size attribute. May return null.
+ static const AllocSizeAttr *getAllocSizeAttr(const CallExpr *CE) {
+ const FunctionDecl *Callee = CE->getDirectCallee();
+ return Callee ? Callee->getAttr<AllocSizeAttr>() : nullptr;
+ }
+
+ /// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr.
+ /// This will look through a single cast.
+ ///
+ /// Returns null if we couldn't unwrap a function with alloc_size.
+ static const CallExpr *tryUnwrapAllocSizeCall(const Expr *E) {
+ if (!E->getType()->isPointerType())
+ return nullptr;
+
+ E = E->IgnoreParens();
+ // If we're doing a variable assignment from e.g. malloc(N), there will
+ // probably be a cast of some kind. Ignore it.
+ if (const auto *Cast = dyn_cast<CastExpr>(E))
+ E = Cast->getSubExpr()->IgnoreParens();
+
+ if (const auto *CE = dyn_cast<CallExpr>(E))
+ return getAllocSizeAttr(CE) ? CE : nullptr;
+ return nullptr;
+ }
+
+ /// Determines whether or not the given Base contains a call to a function
+ /// with the alloc_size attribute.
+ static bool isBaseAnAllocSizeCall(APValue::LValueBase Base) {
+ const auto *E = Base.dyn_cast<const Expr *>();
+ return E && E->getType()->isPointerType() && tryUnwrapAllocSizeCall(E);
+ }
+
+ /// Determines if an LValue with the given LValueBase will have an unsized
+ /// array in its designator.
/// Find the path length and type of the most-derived subobject in the given
/// path, and find the size of the containing array, if any.
- static
- unsigned findMostDerivedSubobject(ASTContext &Ctx, QualType Base,
- ArrayRef<APValue::LValuePathEntry> Path,
- uint64_t &ArraySize, QualType &Type,
- bool &IsArray) {
+ static unsigned
+ findMostDerivedSubobject(ASTContext &Ctx, APValue::LValueBase Base,
+ ArrayRef<APValue::LValuePathEntry> Path,
+ uint64_t &ArraySize, QualType &Type, bool &IsArray) {
+ // This only accepts LValueBases from APValues, and APValues don't support
+ // arrays that lack size info.
+ assert(!isBaseAnAllocSizeCall(Base) &&
+ "Unsized arrays shouldn't appear here");
unsigned MostDerivedLength = 0;
- Type = Base;
+ Type = getType(Base);
+
for (unsigned I = 0, N = Path.size(); I != N; ++I) {
if (Type->isArrayType()) {
const ConstantArrayType *CAT =
- cast<ConstantArrayType>(Ctx.getAsArrayType(Type));
+ cast<ConstantArrayType>(Ctx.getAsArrayType(Type));
Type = CAT->getElementType();
ArraySize = CAT->getSize().getZExtValue();
MostDerivedLength = I + 1;
@@ -162,17 +200,23 @@ namespace {
/// Is this a pointer one past the end of an object?
unsigned IsOnePastTheEnd : 1;
+ /// Indicator of whether the first entry is an unsized array.
+ unsigned FirstEntryIsAnUnsizedArray : 1;
+
/// Indicator of whether the most-derived object is an array element.
unsigned MostDerivedIsArrayElement : 1;
/// The length of the path to the most-derived object of which this is a
/// subobject.
- unsigned MostDerivedPathLength : 29;
+ unsigned MostDerivedPathLength : 28;
/// The size of the array of which the most-derived object is an element.
/// This will always be 0 if the most-derived object is not an array
/// element. 0 is not an indicator of whether or not the most-derived object
/// is an array, however, because 0-length arrays are allowed.
+ ///
+ /// If the current array is an unsized array, the value of this is
+ /// undefined.
uint64_t MostDerivedArraySize;
/// The type of the most derived object referred to by this address.
@@ -187,23 +231,24 @@ namespace {
explicit SubobjectDesignator(QualType T)
: Invalid(false), IsOnePastTheEnd(false),
- MostDerivedIsArrayElement(false), MostDerivedPathLength(0),
- MostDerivedArraySize(0), MostDerivedType(T) {}
+ FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
+ MostDerivedPathLength(0), MostDerivedArraySize(0),
+ MostDerivedType(T) {}
SubobjectDesignator(ASTContext &Ctx, const APValue &V)
: Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
- MostDerivedIsArrayElement(false), MostDerivedPathLength(0),
- MostDerivedArraySize(0) {
+ FirstEntryIsAnUnsizedArray(false), MostDerivedIsArrayElement(false),
+ MostDerivedPathLength(0), MostDerivedArraySize(0) {
+ assert(V.isLValue() && "Non-LValue used to make an LValue designator?");
if (!Invalid) {
IsOnePastTheEnd = V.isLValueOnePastTheEnd();
ArrayRef<PathEntry> VEntries = V.getLValuePath();
Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
if (V.getLValueBase()) {
bool IsArray = false;
- MostDerivedPathLength =
- findMostDerivedSubobject(Ctx, getType(V.getLValueBase()),
- V.getLValuePath(), MostDerivedArraySize,
- MostDerivedType, IsArray);
+ MostDerivedPathLength = findMostDerivedSubobject(
+ Ctx, V.getLValueBase(), V.getLValuePath(), MostDerivedArraySize,
+ MostDerivedType, IsArray);
MostDerivedIsArrayElement = IsArray;
}
}
@@ -214,12 +259,26 @@ namespace {
Entries.clear();
}
+ /// Determine whether the most derived subobject is an array without a
+ /// known bound.
+ bool isMostDerivedAnUnsizedArray() const {
+ assert(!Invalid && "Calling this makes no sense on invalid designators");
+ return Entries.size() == 1 && FirstEntryIsAnUnsizedArray;
+ }
+
+ /// Determine what the most derived array's size is. Results in an assertion
+ /// failure if the most derived array lacks a size.
+ uint64_t getMostDerivedArraySize() const {
+ assert(!isMostDerivedAnUnsizedArray() && "Unsized array has no size");
+ return MostDerivedArraySize;
+ }
+
/// Determine whether this is a one-past-the-end pointer.
bool isOnePastTheEnd() const {
assert(!Invalid);
if (IsOnePastTheEnd)
return true;
- if (MostDerivedIsArrayElement &&
+ if (!isMostDerivedAnUnsizedArray() && MostDerivedIsArrayElement &&
Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize)
return true;
return false;
@@ -247,6 +306,21 @@ namespace {
MostDerivedArraySize = CAT->getSize().getZExtValue();
MostDerivedPathLength = Entries.size();
}
+ /// Update this designator to refer to the first element within the array of
+ /// elements of type T. This is an array of unknown size.
+ void addUnsizedArrayUnchecked(QualType ElemTy) {
+ PathEntry Entry;
+ Entry.ArrayIndex = 0;
+ Entries.push_back(Entry);
+
+ MostDerivedType = ElemTy;
+ MostDerivedIsArrayElement = true;
+ // The value in MostDerivedArraySize is undefined in this case. So, set it
+ // to an arbitrary value that's likely to loudly break things if it's
+ // used.
+ MostDerivedArraySize = std::numeric_limits<uint64_t>::max() / 2;
+ MostDerivedPathLength = Entries.size();
+ }
/// Update this designator to refer to the given base or member of this
/// object.
void addDeclUnchecked(const Decl *D, bool Virtual = false) {
@@ -280,10 +354,16 @@ namespace {
/// Add N to the address of this subobject.
void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
if (Invalid) return;
+ if (isMostDerivedAnUnsizedArray()) {
+ // Can't verify -- trust that the user is doing the right thing (or if
+ // not, trust that the caller will catch the bad behavior).
+ Entries.back().ArrayIndex += N;
+ return;
+ }
if (MostDerivedPathLength == Entries.size() &&
MostDerivedIsArrayElement) {
Entries.back().ArrayIndex += N;
- if (Entries.back().ArrayIndex > MostDerivedArraySize) {
+ if (Entries.back().ArrayIndex > getMostDerivedArraySize()) {
diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex);
setInvalid();
}
@@ -310,15 +390,9 @@ namespace {
/// Parent - The caller of this stack frame.
CallStackFrame *Caller;
- /// CallLoc - The location of the call expression for this call.
- SourceLocation CallLoc;
-
/// Callee - The function which was called.
const FunctionDecl *Callee;
- /// Index - The call index of this call.
- unsigned Index;
-
/// This - The binding for the this pointer in this call, if any.
const LValue *This;
@@ -333,6 +407,12 @@ namespace {
/// Temporaries - Temporary lvalues materialized within this stack frame.
MapTy Temporaries;
+ /// CallLoc - The location of the call expression for this call.
+ SourceLocation CallLoc;
+
+ /// Index - The call index of this call.
+ unsigned Index;
+
CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
APValue *Arguments);
@@ -433,7 +513,7 @@ namespace {
/// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
/// evaluate the expression regardless of what the RHS is, but C only allows
/// certain things in certain situations.
- struct EvalInfo {
+ struct LLVM_ALIGNAS(/*alignof(uint64_t)*/ 8) EvalInfo {
ASTContext &Ctx;
/// EvalStatus - Contains information about the evaluation.
@@ -469,6 +549,10 @@ namespace {
/// declaration whose initializer is being evaluated, if any.
APValue *EvaluatingDeclValue;
+ /// The current array initialization index, if we're performing array
+ /// initialization.
+ uint64_t ArrayInitIndex = -1;
+
/// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
/// notes attached to it will also be stored, otherwise they will not be.
bool HasActiveDiagnostic;
@@ -520,9 +604,15 @@ namespace {
/// gets a chance to look at it.
EM_PotentialConstantExpressionUnevaluated,
- /// Evaluate as a constant expression. Continue evaluating if we find a
- /// MemberExpr with a base that can't be evaluated.
- EM_DesignatorFold,
+ /// Evaluate as a constant expression. Continue evaluating if either:
+ /// - We find a MemberExpr with a base that can't be evaluated.
+ /// - We find a variable initialized with a call to a function that has
+ /// the alloc_size attribute on it.
+ /// In either case, the LValue returned shall have an invalid base; in the
+ /// former, the base will be the invalid MemberExpr, in the latter, the
+ /// base will be either the alloc_size CallExpr or a CastExpr wrapping
+ /// said CallExpr.
+ EM_OffsetFold,
} EvalMode;
/// Are we checking whether the expression is a potential constant
@@ -624,7 +714,7 @@ namespace {
case EM_PotentialConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_PotentialConstantExpressionUnevaluated:
- case EM_DesignatorFold:
+ case EM_OffsetFold:
HasActiveDiagnostic = false;
return OptionalDiagnostic();
}
@@ -716,7 +806,7 @@ namespace {
case EM_ConstantExpression:
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
- case EM_DesignatorFold:
+ case EM_OffsetFold:
return false;
}
llvm_unreachable("Missed EvalMode case");
@@ -735,7 +825,7 @@ namespace {
case EM_EvaluateForOverflow:
case EM_IgnoreSideEffects:
case EM_ConstantFold:
- case EM_DesignatorFold:
+ case EM_OffsetFold:
return true;
case EM_PotentialConstantExpression:
@@ -771,7 +861,7 @@ namespace {
case EM_ConstantExpressionUnevaluated:
case EM_ConstantFold:
case EM_IgnoreSideEffects:
- case EM_DesignatorFold:
+ case EM_OffsetFold:
return false;
}
llvm_unreachable("Missed EvalMode case");
@@ -787,7 +877,7 @@ namespace {
/// (Foo(), 1) // use noteSideEffect
/// (Foo() || true) // use noteSideEffect
/// Foo() + 1 // use noteFailure
- LLVM_ATTRIBUTE_UNUSED_RESULT bool noteFailure() {
+ LLVM_NODISCARD bool noteFailure() {
// Failure when evaluating some expression often means there is some
// subexpression whose evaluation was skipped. Therefore, (because we
// don't track whether we skipped an expression when unwinding after an
@@ -801,8 +891,22 @@ namespace {
}
bool allowInvalidBaseExpr() const {
- return EvalMode == EM_DesignatorFold;
+ return EvalMode == EM_OffsetFold;
}
+
+ class ArrayInitLoopIndex {
+ EvalInfo &Info;
+ uint64_t OuterIndex;
+
+ public:
+ ArrayInitLoopIndex(EvalInfo &Info)
+ : Info(Info), OuterIndex(Info.ArrayInitIndex) {
+ Info.ArrayInitIndex = 0;
+ }
+ ~ArrayInitLoopIndex() { Info.ArrayInitIndex = OuterIndex; }
+
+ operator uint64_t&() { return Info.ArrayInitIndex; }
+ };
};
/// Object used to treat all foldable expressions as constant expressions.
@@ -838,11 +942,10 @@ namespace {
struct FoldOffsetRAII {
EvalInfo &Info;
EvalInfo::EvaluationMode OldMode;
- explicit FoldOffsetRAII(EvalInfo &Info, bool Subobject)
+ explicit FoldOffsetRAII(EvalInfo &Info)
: Info(Info), OldMode(Info.EvalMode) {
if (!Info.checkingPotentialConstantExpression())
- Info.EvalMode = Subobject ? EvalInfo::EM_DesignatorFold
- : EvalInfo::EM_ConstantFold;
+ Info.EvalMode = EvalInfo::EM_OffsetFold;
}
~FoldOffsetRAII() { Info.EvalMode = OldMode; }
@@ -948,10 +1051,12 @@ bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
const Expr *E, uint64_t N) {
+ // If we're complaining, we must be able to statically determine the size of
+ // the most derived array.
if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
Info.CCEDiag(E, diag::note_constexpr_array_index)
<< static_cast<int>(N) << /*array*/ 0
- << static_cast<unsigned>(MostDerivedArraySize);
+ << static_cast<unsigned>(getMostDerivedArraySize());
else
Info.CCEDiag(E, diag::note_constexpr_array_index)
<< static_cast<int>(N) << /*non-array*/ 1;
@@ -961,8 +1066,8 @@ void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
APValue *Arguments)
- : Info(Info), Caller(Info.CurrentCall), CallLoc(CallLoc), Callee(Callee),
- Index(Info.NextCallIndex++), This(This), Arguments(Arguments) {
+ : Info(Info), Caller(Info.CurrentCall), Callee(Callee), This(This),
+ Arguments(Arguments), CallLoc(CallLoc), Index(Info.NextCallIndex++) {
Info.CurrentCall = this;
++Info.CallStackDepth;
}
@@ -1032,7 +1137,7 @@ namespace {
APSInt IntReal, IntImag;
APFloat FloatReal, FloatImag;
- ComplexValue() : FloatReal(APFloat::Bogus), FloatImag(APFloat::Bogus) {}
+ ComplexValue() : FloatReal(APFloat::Bogus()), FloatImag(APFloat::Bogus()) {}
void makeComplexFloat() { IsInt = false; }
bool isComplexFloat() const { return !IsInt; }
@@ -1070,6 +1175,7 @@ namespace {
unsigned InvalidBase : 1;
unsigned CallIndex : 31;
SubobjectDesignator Designator;
+ bool IsNullPtr;
const APValue::LValueBase getLValueBase() const { return Base; }
CharUnits &getLValueOffset() { return Offset; }
@@ -1077,29 +1183,47 @@ namespace {
unsigned getLValueCallIndex() const { return CallIndex; }
SubobjectDesignator &getLValueDesignator() { return Designator; }
const SubobjectDesignator &getLValueDesignator() const { return Designator;}
+ bool isNullPointer() const { return IsNullPtr;}
void moveInto(APValue &V) const {
if (Designator.Invalid)
- V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex);
- else
+ V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex,
+ IsNullPtr);
+ else {
+ assert(!InvalidBase && "APValues can't handle invalid LValue bases");
+ assert(!Designator.FirstEntryIsAnUnsizedArray &&
+ "Unsized array with a valid base?");
V = APValue(Base, Offset, Designator.Entries,
- Designator.IsOnePastTheEnd, CallIndex);
+ Designator.IsOnePastTheEnd, CallIndex, IsNullPtr);
+ }
}
void setFrom(ASTContext &Ctx, const APValue &V) {
- assert(V.isLValue());
+ assert(V.isLValue() && "Setting LValue from a non-LValue?");
Base = V.getLValueBase();
Offset = V.getLValueOffset();
InvalidBase = false;
CallIndex = V.getLValueCallIndex();
Designator = SubobjectDesignator(Ctx, V);
+ IsNullPtr = V.isNullPointer();
}
- void set(APValue::LValueBase B, unsigned I = 0, bool BInvalid = false) {
+ void set(APValue::LValueBase B, unsigned I = 0, bool BInvalid = false,
+ bool IsNullPtr_ = false, uint64_t Offset_ = 0) {
+#ifndef NDEBUG
+ // We only allow a few types of invalid bases. Enforce that here.
+ if (BInvalid) {
+ const auto *E = B.get<const Expr *>();
+ assert((isa<MemberExpr>(E) || tryUnwrapAllocSizeCall(E)) &&
+ "Unexpected type of invalid base");
+ }
+#endif
+
Base = B;
- Offset = CharUnits::Zero();
+ Offset = CharUnits::fromQuantity(Offset_);
InvalidBase = BInvalid;
CallIndex = I;
Designator = SubobjectDesignator(getType(B));
+ IsNullPtr = IsNullPtr_;
}
void setInvalid(APValue::LValueBase B, unsigned I = 0) {
@@ -1112,7 +1236,7 @@ namespace {
CheckSubobjectKind CSK) {
if (Designator.Invalid)
return false;
- if (!Base) {
+ if (IsNullPtr) {
Info.CCEDiag(E, diag::note_constexpr_null_subobject)
<< CSK;
Designator.setInvalid();
@@ -1133,6 +1257,13 @@ namespace {
if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
Designator.addDeclUnchecked(D, Virtual);
}
+ void addUnsizedArray(EvalInfo &Info, QualType ElemTy) {
+ assert(Designator.Entries.empty() && getType(Base)->isPointerType());
+ assert(isBaseAnAllocSizeCall(Base) &&
+ "Only alloc_size bases can have unsized arrays");
+ Designator.FirstEntryIsAnUnsizedArray = true;
+ Designator.addUnsizedArrayUnchecked(ElemTy);
+ }
void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
if (checkSubobject(Info, E, CSK_ArrayToPointer))
Designator.addArrayUnchecked(CAT);
@@ -1141,9 +1272,22 @@ namespace {
if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
Designator.addComplexUnchecked(EltTy, Imag);
}
- void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
- if (N && checkNullPointer(Info, E, CSK_ArrayIndex))
- Designator.adjustIndex(Info, E, N);
+ void clearIsNullPointer() {
+ IsNullPtr = false;
+ }
+ void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E, uint64_t Index,
+ CharUnits ElementSize) {
+ // Compute the new offset in the appropriate width.
+ Offset += Index * ElementSize;
+ if (Index && checkNullPointer(Info, E, CSK_ArrayIndex))
+ Designator.adjustIndex(Info, E, Index);
+ if (Index)
+ clearIsNullPointer();
+ }
+ void adjustOffset(CharUnits N) {
+ Offset += N;
+ if (N.getQuantity())
+ clearIsNullPointer();
}
};
@@ -2018,7 +2162,7 @@ static bool HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
}
unsigned I = FD->getFieldIndex();
- LVal.Offset += Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I));
+ LVal.adjustOffset(Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I)));
LVal.addDecl(Info, E, FD);
return true;
}
@@ -2072,9 +2216,7 @@ static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee))
return false;
- // Compute the new offset in the appropriate width.
- LVal.Offset += Adjustment * SizeOfPointee;
- LVal.adjustIndex(Info, E, Adjustment);
+ LVal.adjustOffsetAndIndex(Info, E, Adjustment, SizeOfPointee);
return true;
}
@@ -2125,7 +2267,22 @@ static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
// If this is a local variable, dig out its value.
if (Frame) {
Result = Frame->getTemporary(VD);
- assert(Result && "missing value for local variable");
+ if (!Result) {
+ // Assume variables referenced within a lambda's call operator that were
+ // not declared within the call operator are captures and during checking
+ // of a potential constant expression, assume they are unknown constant
+ // expressions.
+ assert(isLambdaCallOperator(Frame->Callee) &&
+ (VD->getDeclContext() != Frame->Callee || VD->isInitCapture()) &&
+ "missing value for local variable");
+ if (Info.checkingPotentialConstantExpression())
+ return false;
+ // FIXME: implement capture evaluation during constant expr evaluation.
+ Info.FFDiag(E->getLocStart(),
+ diag::note_unimplemented_constexpr_lambda_feature_ast)
+ << "captures not currently allowed";
+ return false;
+ }
return true;
}
@@ -2771,6 +2928,9 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
} else {
Info.CCEDiag(E);
}
+ } else if (BaseType.isConstQualified() && VD->hasDefinition(Info.Ctx)) {
+ Info.CCEDiag(E, diag::note_constexpr_ltor_non_constexpr) << VD;
+ // Keep evaluating to see what we can do.
} else {
// FIXME: Allow folding of values of any literal type in all languages.
if (Info.checkingPotentialConstantExpression() &&
@@ -2892,7 +3052,6 @@ static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
// In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the
// initializer until now for such expressions. Such an expression can't be
// an ICE in C, so this only matters for fold.
- assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
if (Type.isVolatileQualified()) {
Info.FFDiag(Conv);
return false;
@@ -3385,38 +3544,51 @@ enum EvalStmtResult {
};
}
-static bool EvaluateDecl(EvalInfo &Info, const Decl *D) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- // We don't need to evaluate the initializer for a static local.
- if (!VD->hasLocalStorage())
- return true;
+static bool EvaluateVarDecl(EvalInfo &Info, const VarDecl *VD) {
+ // We don't need to evaluate the initializer for a static local.
+ if (!VD->hasLocalStorage())
+ return true;
- LValue Result;
- Result.set(VD, Info.CurrentCall->Index);
- APValue &Val = Info.CurrentCall->createTemporary(VD, true);
+ LValue Result;
+ Result.set(VD, Info.CurrentCall->Index);
+ APValue &Val = Info.CurrentCall->createTemporary(VD, true);
- const Expr *InitE = VD->getInit();
- if (!InitE) {
- Info.FFDiag(D->getLocStart(), diag::note_constexpr_uninitialized)
- << false << VD->getType();
- Val = APValue();
- return false;
- }
+ const Expr *InitE = VD->getInit();
+ if (!InitE) {
+ Info.FFDiag(VD->getLocStart(), diag::note_constexpr_uninitialized)
+ << false << VD->getType();
+ Val = APValue();
+ return false;
+ }
- if (InitE->isValueDependent())
- return false;
+ if (InitE->isValueDependent())
+ return false;
- if (!EvaluateInPlace(Val, Info, Result, InitE)) {
- // Wipe out any partially-computed value, to allow tracking that this
- // evaluation failed.
- Val = APValue();
- return false;
- }
+ if (!EvaluateInPlace(Val, Info, Result, InitE)) {
+ // Wipe out any partially-computed value, to allow tracking that this
+ // evaluation failed.
+ Val = APValue();
+ return false;
}
return true;
}
+static bool EvaluateDecl(EvalInfo &Info, const Decl *D) {
+ bool OK = true;
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ OK &= EvaluateVarDecl(Info, VD);
+
+ if (const DecompositionDecl *DD = dyn_cast<DecompositionDecl>(D))
+ for (auto *BD : DD->bindings())
+ if (auto *VD = BD->getHoldingVar())
+ OK &= EvaluateDecl(Info, VD);
+
+ return OK;
+}
+
+
/// Evaluate a condition (either a variable declaration or an expression).
static bool EvaluateCond(EvalInfo &Info, const VarDecl *CondDecl,
const Expr *Cond, bool &Result) {
@@ -4389,8 +4561,11 @@ public:
}
// Don't call function pointers which have been cast to some other type.
- if (!Info.Ctx.hasSameType(CalleeType->getPointeeType(), FD->getType()))
+ // Per DR (no number yet), the caller and callee can differ in noexcept.
+ if (!Info.Ctx.hasSameFunctionTypeIgnoringExceptionSpec(
+ CalleeType->getPointeeType(), FD->getType())) {
return Error(E);
+ }
} else
return Error(E);
@@ -4683,7 +4858,7 @@ public:
// * VarDecl
// * FunctionDecl
// - Literals
-// * CompoundLiteralExpr in C
+// * CompoundLiteralExpr in C (and in global scope in C++)
// * StringLiteral
// * CXXTypeidExpr
// * PredefinedExpr
@@ -4770,13 +4945,26 @@ bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
return Success(FD);
if (const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
return VisitVarDecl(E, VD);
+ if (const BindingDecl *BD = dyn_cast<BindingDecl>(E->getDecl()))
+ return Visit(BD->getBinding());
return Error(E);
}
+
bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
CallStackFrame *Frame = nullptr;
- if (VD->hasLocalStorage() && Info.CurrentCall->Index > 1)
- Frame = Info.CurrentCall;
+ if (VD->hasLocalStorage() && Info.CurrentCall->Index > 1) {
+ // Only if a local variable was declared in the function currently being
+ // evaluated, do we expect to be able to find its value in the current
+ // frame. (Otherwise it was likely declared in an enclosing context and
+ // could either have a valid evaluatable value (for e.g. a constexpr
+ // variable) or be ill-formed (and trigger an appropriate evaluation
+ // diagnostic)).
+ if (Info.CurrentCall->Callee &&
+ Info.CurrentCall->Callee->Equals(VD->getDeclContext())) {
+ Frame = Info.CurrentCall;
+ }
+ }
if (!VD->getType()->isReferenceType()) {
if (Frame) {
@@ -4865,7 +5053,8 @@ bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
bool
LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
- assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
+ assert((!Info.getLangOpts().CPlusPlus || E->isFileScope()) &&
+ "lvalue compound literal in c++?");
// Defer visiting the literal until the lvalue-to-rvalue conversion. We can
// only see this when folding in C, so there's no standard to follow here.
return Success(E);
@@ -5000,6 +5189,105 @@ bool LValueExprEvaluator::VisitBinAssign(const BinaryOperator *E) {
// Pointer Evaluation
//===----------------------------------------------------------------------===//
+/// \brief Attempts to compute the number of bytes available at the pointer
+/// returned by a function with the alloc_size attribute. Returns true if we
+/// were successful. Places an unsigned number into `Result`.
+///
+/// This expects the given CallExpr to be a call to a function with an
+/// alloc_size attribute.
+static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
+ const CallExpr *Call,
+ llvm::APInt &Result) {
+ const AllocSizeAttr *AllocSize = getAllocSizeAttr(Call);
+
+ // alloc_size args are 1-indexed, 0 means not present.
+ assert(AllocSize && AllocSize->getElemSizeParam() != 0);
+ unsigned SizeArgNo = AllocSize->getElemSizeParam() - 1;
+ unsigned BitsInSizeT = Ctx.getTypeSize(Ctx.getSizeType());
+ if (Call->getNumArgs() <= SizeArgNo)
+ return false;
+
+ auto EvaluateAsSizeT = [&](const Expr *E, APSInt &Into) {
+ if (!E->EvaluateAsInt(Into, Ctx, Expr::SE_AllowSideEffects))
+ return false;
+ if (Into.isNegative() || !Into.isIntN(BitsInSizeT))
+ return false;
+ Into = Into.zextOrSelf(BitsInSizeT);
+ return true;
+ };
+
+ APSInt SizeOfElem;
+ if (!EvaluateAsSizeT(Call->getArg(SizeArgNo), SizeOfElem))
+ return false;
+
+ if (!AllocSize->getNumElemsParam()) {
+ Result = std::move(SizeOfElem);
+ return true;
+ }
+
+ APSInt NumberOfElems;
+ // Argument numbers start at 1
+ unsigned NumArgNo = AllocSize->getNumElemsParam() - 1;
+ if (!EvaluateAsSizeT(Call->getArg(NumArgNo), NumberOfElems))
+ return false;
+
+ bool Overflow;
+ llvm::APInt BytesAvailable = SizeOfElem.umul_ov(NumberOfElems, Overflow);
+ if (Overflow)
+ return false;
+
+ Result = std::move(BytesAvailable);
+ return true;
+}
+
+/// \brief Convenience function. LVal's base must be a call to an alloc_size
+/// function.
+static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
+ const LValue &LVal,
+ llvm::APInt &Result) {
+ assert(isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
+ "Can't get the size of a non alloc_size function");
+ const auto *Base = LVal.getLValueBase().get<const Expr *>();
+ const CallExpr *CE = tryUnwrapAllocSizeCall(Base);
+ return getBytesReturnedByAllocSizeCall(Ctx, CE, Result);
+}
+
+/// \brief Attempts to evaluate the given LValueBase as the result of a call to
+/// a function with the alloc_size attribute. If it was possible to do so, this
+/// function will return true, make Result's Base point to said function call,
+/// and mark Result's Base as invalid.
+static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base,
+ LValue &Result) {
+ if (!Info.allowInvalidBaseExpr() || Base.isNull())
+ return false;
+
+ // Because we do no form of static analysis, we only support const variables.
+ //
+ // Additionally, we can't support parameters, nor can we support static
+ // variables (in the latter case, use-before-assign isn't UB; in the former,
+ // we have no clue what they'll be assigned to).
+ const auto *VD =
+ dyn_cast_or_null<VarDecl>(Base.dyn_cast<const ValueDecl *>());
+ if (!VD || !VD->isLocalVarDecl() || !VD->getType().isConstQualified())
+ return false;
+
+ const Expr *Init = VD->getAnyInitializer();
+ if (!Init)
+ return false;
+
+ const Expr *E = Init->IgnoreParens();
+ if (!tryUnwrapAllocSizeCall(E))
+ return false;
+
+ // Store E instead of E unwrapped so that the type of the LValue's base is
+ // what the user wanted.
+ Result.setInvalid(E);
+
+ QualType Pointee = E->getType()->castAs<PointerType>()->getPointeeType();
+ Result.addUnsizedArray(Info, Pointee);
+ return true;
+}
+
namespace {
class PointerExprEvaluator
: public ExprEvaluatorBase<PointerExprEvaluator> {
@@ -5009,6 +5297,8 @@ class PointerExprEvaluator
Result.set(E);
return true;
}
+
+ bool visitNonBuiltinCallExpr(const CallExpr *E);
public:
PointerExprEvaluator(EvalInfo &info, LValue &Result)
@@ -5019,7 +5309,9 @@ public:
return true;
}
bool ZeroInitialization(const Expr *E) {
- return Success((Expr*)nullptr);
+ auto Offset = Info.Ctx.getTargetNullPointerValue(E->getType());
+ Result.set((Expr*)nullptr, 0, false, true, Offset);
+ return true;
}
bool VisitBinaryOperator(const BinaryOperator *E);
@@ -5032,6 +5324,7 @@ public:
bool VisitAddrLabelExpr(const AddrLabelExpr *E)
{ return Success(E); }
bool VisitCallExpr(const CallExpr *E);
+ bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp);
bool VisitBlockExpr(const BlockExpr *E) {
if (!E->getBlockDecl()->hasCaptures())
return Success(E);
@@ -5117,6 +5410,8 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
else
CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
}
+ if (E->getCastKind() == CK_AddressSpaceConversion && Result.IsNullPtr)
+ ZeroInitialization(E);
return true;
case CK_DerivedToBase:
@@ -5158,6 +5453,7 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
Result.Offset = CharUnits::fromQuantity(N);
Result.CallIndex = 0;
Result.Designator.setInvalid();
+ Result.IsNullPtr = false;
return true;
} else {
// Cast is of an lvalue, no need to change value.
@@ -5185,6 +5481,19 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
case CK_FunctionToPointerDecay:
return EvaluateLValue(SubExpr, Result, Info);
+
+ case CK_LValueToRValue: {
+ LValue LVal;
+ if (!EvaluateLValue(E->getSubExpr(), LVal, Info))
+ return false;
+
+ APValue RVal;
+ // Note, we use the subexpression's type in order to retain cv-qualifiers.
+ if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(),
+ LVal, RVal))
+ return evaluateLValueAsAllocSize(Info, LVal.Base, Result);
+ return Success(RVal, E);
+ }
}
return ExprEvaluatorBaseTy::VisitCastExpr(E);
@@ -5222,11 +5531,33 @@ static CharUnits GetAlignOfExpr(EvalInfo &Info, const Expr *E) {
return GetAlignOfType(Info, E->getType());
}
+// To be clear: this happily visits unsupported builtins. Better name welcomed.
+bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) {
+ if (ExprEvaluatorBaseTy::VisitCallExpr(E))
+ return true;
+
+ if (!(Info.allowInvalidBaseExpr() && getAllocSizeAttr(E)))
+ return false;
+
+ Result.setInvalid(E);
+ QualType PointeeTy = E->getType()->castAs<PointerType>()->getPointeeType();
+ Result.addUnsizedArray(Info, PointeeTy);
+ return true;
+}
+
bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
if (IsStringLiteralCall(E))
return Success(E);
- switch (E->getBuiltinCallee()) {
+ if (unsigned BuiltinOp = E->getBuiltinCallee())
+ return VisitBuiltinCallExpr(E, BuiltinOp);
+
+ return visitNonBuiltinCallExpr(E);
+}
+
+bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
+ unsigned BuiltinOp) {
+ switch (BuiltinOp) {
case Builtin::BI__builtin_addressof:
return EvaluateLValue(E->getArg(0), Result, Info);
case Builtin::BI__builtin_assume_aligned: {
@@ -5264,8 +5595,8 @@ bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
if (BaseAlignment < Align) {
Result.Designator.setInvalid();
- // FIXME: Quantities here cast to integers because the plural modifier
- // does not work on APSInts yet.
+ // FIXME: Quantities here cast to integers because the plural modifier
+ // does not work on APSInts yet.
CCEDiag(E->getArg(0),
diag::note_constexpr_baa_insufficient_alignment) << 0
<< (int) BaseAlignment.getQuantity()
@@ -5294,8 +5625,93 @@ bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
return true;
}
+
+ case Builtin::BIstrchr:
+ case Builtin::BIwcschr:
+ case Builtin::BImemchr:
+ case Builtin::BIwmemchr:
+ if (Info.getLangOpts().CPlusPlus11)
+ Info.CCEDiag(E, diag::note_constexpr_invalid_function)
+ << /*isConstexpr*/0 << /*isConstructor*/0
+ << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
+ else
+ Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ // Fall through.
+ case Builtin::BI__builtin_strchr:
+ case Builtin::BI__builtin_wcschr:
+ case Builtin::BI__builtin_memchr:
+ case Builtin::BI__builtin_wmemchr: {
+ if (!Visit(E->getArg(0)))
+ return false;
+ APSInt Desired;
+ if (!EvaluateInteger(E->getArg(1), Desired, Info))
+ return false;
+ uint64_t MaxLength = uint64_t(-1);
+ if (BuiltinOp != Builtin::BIstrchr &&
+ BuiltinOp != Builtin::BIwcschr &&
+ BuiltinOp != Builtin::BI__builtin_strchr &&
+ BuiltinOp != Builtin::BI__builtin_wcschr) {
+ APSInt N;
+ if (!EvaluateInteger(E->getArg(2), N, Info))
+ return false;
+ MaxLength = N.getExtValue();
+ }
+
+ QualType CharTy = E->getArg(0)->getType()->getPointeeType();
+
+ // Figure out what value we're actually looking for (after converting to
+ // the corresponding unsigned type if necessary).
+ uint64_t DesiredVal;
+ bool StopAtNull = false;
+ switch (BuiltinOp) {
+ case Builtin::BIstrchr:
+ case Builtin::BI__builtin_strchr:
+ // strchr compares directly to the passed integer, and therefore
+ // always fails if given an int that is not a char.
+ if (!APSInt::isSameValue(HandleIntToIntCast(Info, E, CharTy,
+ E->getArg(1)->getType(),
+ Desired),
+ Desired))
+ return ZeroInitialization(E);
+ StopAtNull = true;
+ // Fall through.
+ case Builtin::BImemchr:
+ case Builtin::BI__builtin_memchr:
+ // memchr compares by converting both sides to unsigned char. That's also
+ // correct for strchr if we get this far (to cope with plain char being
+ // unsigned in the strchr case).
+ DesiredVal = Desired.trunc(Info.Ctx.getCharWidth()).getZExtValue();
+ break;
+
+ case Builtin::BIwcschr:
+ case Builtin::BI__builtin_wcschr:
+ StopAtNull = true;
+ // Fall through.
+ case Builtin::BIwmemchr:
+ case Builtin::BI__builtin_wmemchr:
+ // wcschr and wmemchr are given a wchar_t to look for. Just use it.
+ DesiredVal = Desired.getZExtValue();
+ break;
+ }
+
+ for (; MaxLength; --MaxLength) {
+ APValue Char;
+ if (!handleLValueToRValueConversion(Info, E, CharTy, Result, Char) ||
+ !Char.isInt())
+ return false;
+ if (Char.getInt().getZExtValue() == DesiredVal)
+ return true;
+ if (StopAtNull && !Char.getInt())
+ break;
+ if (!HandleLValueArrayAdjustment(Info, E, Result, CharTy, 1))
+ return false;
+ }
+ // Not found: return nullptr.
+ return ZeroInitialization(E);
+ }
+
default:
- return ExprEvaluatorBaseTy::VisitCallExpr(E);
+ return visitNonBuiltinCallExpr(E);
}
}
@@ -5535,6 +5951,9 @@ bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
}
bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ if (E->isTransparent())
+ return Visit(E->getInit(0));
+
const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
if (RD->isInvalidDecl()) return false;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
@@ -5890,7 +6309,7 @@ bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
if (EltTy->isRealFloatingType()) {
const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy);
unsigned FloatEltSize = EltSize;
- if (&Sem == &APFloat::x87DoubleExtended)
+ if (&Sem == &APFloat::x87DoubleExtended())
FloatEltSize = 80;
for (unsigned i = 0; i < NElts; i++) {
llvm::APInt Elt;
@@ -6030,6 +6449,7 @@ namespace {
return handleCallExpr(E, Result, &This);
}
bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E);
bool VisitCXXConstructExpr(const CXXConstructExpr *E);
bool VisitCXXConstructExpr(const CXXConstructExpr *E,
const LValue &Subobject,
@@ -6112,6 +6532,35 @@ bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
FillerExpr) && Success;
}
+bool ArrayExprEvaluator::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E) {
+ if (E->getCommonExpr() &&
+ !Evaluate(Info.CurrentCall->createTemporary(E->getCommonExpr(), false),
+ Info, E->getCommonExpr()->getSourceExpr()))
+ return false;
+
+ auto *CAT = cast<ConstantArrayType>(E->getType()->castAsArrayTypeUnsafe());
+
+ uint64_t Elements = CAT->getSize().getZExtValue();
+ Result = APValue(APValue::UninitArray(), Elements, Elements);
+
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+
+ bool Success = true;
+ for (EvalInfo::ArrayInitLoopIndex Index(Info); Index != Elements; ++Index) {
+ if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
+ Info, Subobject, E->getSubExpr()) ||
+ !HandleLValueArrayAdjustment(Info, E, Subobject,
+ CAT->getElementType(), 1)) {
+ if (!Info.noteFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ return Success;
+}
+
bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
return VisitCXXConstructExpr(E, This, &Result, E->getType());
}
@@ -6252,6 +6701,7 @@ public:
}
bool VisitCallExpr(const CallExpr *E);
+ bool VisitBuiltinCallExpr(const CallExpr *E, unsigned BuiltinOp);
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitOffsetOfExpr(const OffsetOfExpr *E);
bool VisitUnaryOperator(const UnaryOperator *E);
@@ -6266,6 +6716,16 @@ public:
bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
return Success(E->getValue(), E);
}
+
+ bool VisitArrayInitIndexExpr(const ArrayInitIndexExpr *E) {
+ if (Info.ArrayInitIndex == uint64_t(-1)) {
+ // We were asked to evaluate this subexpression independent of the
+ // enclosing ArrayInitLoopExpr. We can't do that.
+ Info.FFDiag(E);
+ return false;
+ }
+ return Success(Info.ArrayInitIndex, E);
+ }
// Note, GNU defines __null as an integer, not a pointer.
bool VisitGNUNullExpr(const GNUNullExpr *E) {
@@ -6290,8 +6750,6 @@ public:
bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
-private:
- bool TryEvaluateBuiltinObjectSize(const CallExpr *E, unsigned Type);
// FIXME: Missing: array subscript of vector, member of vector
};
} // end anonymous namespace
@@ -6563,7 +7021,7 @@ static QualType getObjectType(APValue::LValueBase B) {
}
/// A more selective version of E->IgnoreParenCasts for
-/// TryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only
+/// tryEvaluateBuiltinObjectSize. This ignores some casts/parens that serve only
/// to change the type of E.
/// Ex. For E = `(short*)((char*)(&foo))`, returns `&foo`
///
@@ -6630,82 +7088,191 @@ static bool isDesignatorAtObjectEnd(const ASTContext &Ctx, const LValue &LVal) {
}
}
+ unsigned I = 0;
QualType BaseType = getType(Base);
- for (int I = 0, E = LVal.Designator.Entries.size(); I != E; ++I) {
+ if (LVal.Designator.FirstEntryIsAnUnsizedArray) {
+ assert(isBaseAnAllocSizeCall(Base) &&
+ "Unsized array in non-alloc_size call?");
+ // If this is an alloc_size base, we should ignore the initial array index
+ ++I;
+ BaseType = BaseType->castAs<PointerType>()->getPointeeType();
+ }
+
+ for (unsigned E = LVal.Designator.Entries.size(); I != E; ++I) {
+ const auto &Entry = LVal.Designator.Entries[I];
if (BaseType->isArrayType()) {
// Because __builtin_object_size treats arrays as objects, we can ignore
// the index iff this is the last array in the Designator.
if (I + 1 == E)
return true;
- auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType));
- uint64_t Index = LVal.Designator.Entries[I].ArrayIndex;
+ const auto *CAT = cast<ConstantArrayType>(Ctx.getAsArrayType(BaseType));
+ uint64_t Index = Entry.ArrayIndex;
if (Index + 1 != CAT->getSize())
return false;
BaseType = CAT->getElementType();
} else if (BaseType->isAnyComplexType()) {
- auto *CT = BaseType->castAs<ComplexType>();
- uint64_t Index = LVal.Designator.Entries[I].ArrayIndex;
+ const auto *CT = BaseType->castAs<ComplexType>();
+ uint64_t Index = Entry.ArrayIndex;
if (Index != 1)
return false;
BaseType = CT->getElementType();
- } else if (auto *FD = getAsField(LVal.Designator.Entries[I])) {
+ } else if (auto *FD = getAsField(Entry)) {
bool Invalid;
if (!IsLastOrInvalidFieldDecl(FD, Invalid))
return Invalid;
BaseType = FD->getType();
} else {
- assert(getAsBaseClass(LVal.Designator.Entries[I]) != nullptr &&
- "Expecting cast to a base class");
+ assert(getAsBaseClass(Entry) && "Expecting cast to a base class");
return false;
}
}
return true;
}
-/// Tests to see if the LValue has a designator (that isn't necessarily valid).
+/// Tests to see if the LValue has a user-specified designator (that isn't
+/// necessarily valid). Note that this always returns 'true' if the LValue has
+/// an unsized array as its first designator entry, because there's currently no
+/// way to tell if the user typed *foo or foo[0].
static bool refersToCompleteObject(const LValue &LVal) {
- if (LVal.Designator.Invalid || !LVal.Designator.Entries.empty())
+ if (LVal.Designator.Invalid)
return false;
+ if (!LVal.Designator.Entries.empty())
+ return LVal.Designator.isMostDerivedAnUnsizedArray();
+
if (!LVal.InvalidBase)
return true;
- auto *E = LVal.Base.dyn_cast<const Expr *>();
- (void)E;
- assert(E != nullptr && isa<MemberExpr>(E));
- return false;
+ // If `E` is a MemberExpr, then the first part of the designator is hiding in
+ // the LValueBase.
+ const auto *E = LVal.Base.dyn_cast<const Expr *>();
+ return !E || !isa<MemberExpr>(E);
+}
+
+/// Attempts to detect a user writing into a piece of memory that's impossible
+/// to figure out the size of by just using types.
+static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal) {
+ const SubobjectDesignator &Designator = LVal.Designator;
+ // Notes:
+ // - Users can only write off of the end when we have an invalid base. Invalid
+ // bases imply we don't know where the memory came from.
+ // - We used to be a bit more aggressive here; we'd only be conservative if
+ // the array at the end was flexible, or if it had 0 or 1 elements. This
+ // broke some common standard library extensions (PR30346), but was
+ // otherwise seemingly fine. It may be useful to reintroduce this behavior
+ // with some sort of whitelist. OTOH, it seems that GCC is always
+ // conservative with the last element in structs (if it's an array), so our
+ // current behavior is more compatible than a whitelisting approach would
+ // be.
+ return LVal.InvalidBase &&
+ Designator.Entries.size() == Designator.MostDerivedPathLength &&
+ Designator.MostDerivedIsArrayElement &&
+ isDesignatorAtObjectEnd(Ctx, LVal);
+}
+
+/// Converts the given APInt to CharUnits, assuming the APInt is unsigned.
+/// Fails if the conversion would cause loss of precision.
+static bool convertUnsignedAPIntToCharUnits(const llvm::APInt &Int,
+ CharUnits &Result) {
+ auto CharUnitsMax = std::numeric_limits<CharUnits::QuantityType>::max();
+ if (Int.ugt(CharUnitsMax))
+ return false;
+ Result = CharUnits::fromQuantity(Int.getZExtValue());
+ return true;
}
-/// Tries to evaluate the __builtin_object_size for @p E. If successful, returns
-/// true and stores the result in @p Size.
+/// Helper for tryEvaluateBuiltinObjectSize -- Given an LValue, this will
+/// determine how many bytes exist from the beginning of the object to either
+/// the end of the current subobject, or the end of the object itself, depending
+/// on what the LValue looks like + the value of Type.
///
-/// If @p WasError is non-null, this will report whether the failure to evaluate
-/// is to be treated as an Error in IntExprEvaluator.
-static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
- EvalInfo &Info, uint64_t &Size,
- bool *WasError = nullptr) {
- if (WasError != nullptr)
- *WasError = false;
-
- auto Error = [&](const Expr *E) {
- if (WasError != nullptr)
- *WasError = true;
+/// If this returns false, the value of Result is undefined.
+static bool determineEndOffset(EvalInfo &Info, SourceLocation ExprLoc,
+ unsigned Type, const LValue &LVal,
+ CharUnits &EndOffset) {
+ bool DetermineForCompleteObject = refersToCompleteObject(LVal);
+
+ // We want to evaluate the size of the entire object. This is a valid fallback
+ // for when Type=1 and the designator is invalid, because we're asked for an
+ // upper-bound.
+ if (!(Type & 1) || LVal.Designator.Invalid || DetermineForCompleteObject) {
+ // Type=3 wants a lower bound, so we can't fall back to this.
+ if (Type == 3 && !DetermineForCompleteObject)
+ return false;
+
+ llvm::APInt APEndOffset;
+ if (isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
+ getBytesReturnedByAllocSizeCall(Info.Ctx, LVal, APEndOffset))
+ return convertUnsignedAPIntToCharUnits(APEndOffset, EndOffset);
+
+ if (LVal.InvalidBase)
+ return false;
+
+ QualType BaseTy = getObjectType(LVal.getLValueBase());
+ return !BaseTy.isNull() && HandleSizeof(Info, ExprLoc, BaseTy, EndOffset);
+ }
+
+ // We want to evaluate the size of a subobject.
+ const SubobjectDesignator &Designator = LVal.Designator;
+
+ // The following is a moderately common idiom in C:
+ //
+ // struct Foo { int a; char c[1]; };
+ // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar));
+ // strcpy(&F->c[0], Bar);
+ //
+ // In order to not break too much legacy code, we need to support it.
+ if (isUserWritingOffTheEnd(Info.Ctx, LVal)) {
+ // If we can resolve this to an alloc_size call, we can hand that back,
+ // because we know for certain how many bytes there are to write to.
+ llvm::APInt APEndOffset;
+ if (isBaseAnAllocSizeCall(LVal.getLValueBase()) &&
+ getBytesReturnedByAllocSizeCall(Info.Ctx, LVal, APEndOffset))
+ return convertUnsignedAPIntToCharUnits(APEndOffset, EndOffset);
+
+ // If we cannot determine the size of the initial allocation, then we can't
+ // given an accurate upper-bound. However, we are still able to give
+ // conservative lower-bounds for Type=3.
+ if (Type == 1)
+ return false;
+ }
+
+ CharUnits BytesPerElem;
+ if (!HandleSizeof(Info, ExprLoc, Designator.MostDerivedType, BytesPerElem))
return false;
- };
- auto Success = [&](uint64_t S, const Expr *E) {
- Size = S;
- return true;
- };
+ // According to the GCC documentation, we want the size of the subobject
+ // denoted by the pointer. But that's not quite right -- what we actually
+ // want is the size of the immediately-enclosing array, if there is one.
+ int64_t ElemsRemaining;
+ if (Designator.MostDerivedIsArrayElement &&
+ Designator.Entries.size() == Designator.MostDerivedPathLength) {
+ uint64_t ArraySize = Designator.getMostDerivedArraySize();
+ uint64_t ArrayIndex = Designator.Entries.back().ArrayIndex;
+ ElemsRemaining = ArraySize <= ArrayIndex ? 0 : ArraySize - ArrayIndex;
+ } else {
+ ElemsRemaining = Designator.isOnePastTheEnd() ? 0 : 1;
+ }
+ EndOffset = LVal.getLValueOffset() + BytesPerElem * ElemsRemaining;
+ return true;
+}
+
+/// \brief Tries to evaluate the __builtin_object_size for @p E. If successful,
+/// returns true and stores the result in @p Size.
+///
+/// If @p WasError is non-null, this will report whether the failure to evaluate
+/// is to be treated as an Error in IntExprEvaluator.
+static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
+ EvalInfo &Info, uint64_t &Size) {
// Determine the denoted object.
- LValue Base;
+ LValue LVal;
{
// The operand of __builtin_object_size is never evaluated for side-effects.
// If there are any, but we can determine the pointed-to object anyway, then
// ignore the side-effects.
SpeculativeEvaluationRAII SpeculativeEval(Info);
- FoldOffsetRAII Fold(Info, Type & 1);
+ FoldOffsetRAII Fold(Info);
if (E->isGLValue()) {
// It's possible for us to be given GLValues if we're called via
@@ -6713,118 +7280,40 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
APValue RVal;
if (!EvaluateAsRValue(Info, E, RVal))
return false;
- Base.setFrom(Info.Ctx, RVal);
- } else if (!EvaluatePointer(ignorePointerCastsAndParens(E), Base, Info))
+ LVal.setFrom(Info.Ctx, RVal);
+ } else if (!EvaluatePointer(ignorePointerCastsAndParens(E), LVal, Info))
return false;
}
- CharUnits BaseOffset = Base.getLValueOffset();
// If we point to before the start of the object, there are no accessible
// bytes.
- if (BaseOffset.isNegative())
- return Success(0, E);
-
- // In the case where we're not dealing with a subobject, we discard the
- // subobject bit.
- bool SubobjectOnly = (Type & 1) != 0 && !refersToCompleteObject(Base);
-
- // If Type & 1 is 0, we need to be able to statically guarantee that the bytes
- // exist. If we can't verify the base, then we can't do that.
- //
- // As a special case, we produce a valid object size for an unknown object
- // with a known designator if Type & 1 is 1. For instance:
- //
- // extern struct X { char buff[32]; int a, b, c; } *p;
- // int a = __builtin_object_size(p->buff + 4, 3); // returns 28
- // int b = __builtin_object_size(p->buff + 4, 2); // returns 0, not 40
- //
- // This matches GCC's behavior.
- if (Base.InvalidBase && !SubobjectOnly)
- return Error(E);
-
- // If we're not examining only the subobject, then we reset to a complete
- // object designator
- //
- // If Type is 1 and we've lost track of the subobject, just find the complete
- // object instead. (If Type is 3, that's not correct behavior and we should
- // return 0 instead.)
- LValue End = Base;
- if (!SubobjectOnly || (End.Designator.Invalid && Type == 1)) {
- QualType T = getObjectType(End.getLValueBase());
- if (T.isNull())
- End.Designator.setInvalid();
- else {
- End.Designator = SubobjectDesignator(T);
- End.Offset = CharUnits::Zero();
- }
+ if (LVal.getLValueOffset().isNegative()) {
+ Size = 0;
+ return true;
}
- // If it is not possible to determine which objects ptr points to at compile
- // time, __builtin_object_size should return (size_t) -1 for type 0 or 1
- // and (size_t) 0 for type 2 or 3.
- if (End.Designator.Invalid)
- return false;
-
- // According to the GCC documentation, we want the size of the subobject
- // denoted by the pointer. But that's not quite right -- what we actually
- // want is the size of the immediately-enclosing array, if there is one.
- int64_t AmountToAdd = 1;
- if (End.Designator.MostDerivedIsArrayElement &&
- End.Designator.Entries.size() == End.Designator.MostDerivedPathLength) {
- // We got a pointer to an array. Step to its end.
- AmountToAdd = End.Designator.MostDerivedArraySize -
- End.Designator.Entries.back().ArrayIndex;
- } else if (End.Designator.isOnePastTheEnd()) {
- // We're already pointing at the end of the object.
- AmountToAdd = 0;
- }
-
- QualType PointeeType = End.Designator.MostDerivedType;
- assert(!PointeeType.isNull());
- if (PointeeType->isIncompleteType() || PointeeType->isFunctionType())
- return Error(E);
-
- if (!HandleLValueArrayAdjustment(Info, E, End, End.Designator.MostDerivedType,
- AmountToAdd))
- return false;
-
- auto EndOffset = End.getLValueOffset();
-
- // The following is a moderately common idiom in C:
- //
- // struct Foo { int a; char c[1]; };
- // struct Foo *F = (struct Foo *)malloc(sizeof(struct Foo) + strlen(Bar));
- // strcpy(&F->c[0], Bar);
- //
- // So, if we see that we're examining a 1-length (or 0-length) array at the
- // end of a struct with an unknown base, we give up instead of breaking code
- // that behaves this way. Note that we only do this when Type=1, because
- // Type=3 is a lower bound, so answering conservatively is fine.
- if (End.InvalidBase && SubobjectOnly && Type == 1 &&
- End.Designator.Entries.size() == End.Designator.MostDerivedPathLength &&
- End.Designator.MostDerivedIsArrayElement &&
- End.Designator.MostDerivedArraySize < 2 &&
- isDesignatorAtObjectEnd(Info.Ctx, End))
+ CharUnits EndOffset;
+ if (!determineEndOffset(Info, E->getExprLoc(), Type, LVal, EndOffset))
return false;
- if (BaseOffset > EndOffset)
- return Success(0, E);
-
- return Success((EndOffset - BaseOffset).getQuantity(), E);
+ // If we've fallen outside of the end offset, just pretend there's nothing to
+ // write to/read from.
+ if (EndOffset <= LVal.getLValueOffset())
+ Size = 0;
+ else
+ Size = (EndOffset - LVal.getLValueOffset()).getQuantity();
+ return true;
}
-bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E,
- unsigned Type) {
- uint64_t Size;
- bool WasError;
- if (::tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size, &WasError))
- return Success(Size, E);
- if (WasError)
- return Error(E);
- return false;
+bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ if (unsigned BuiltinOp = E->getBuiltinCallee())
+ return VisitBuiltinCallExpr(E, BuiltinOp);
+
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
}
-bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
+bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
+ unsigned BuiltinOp) {
switch (unsigned BuiltinOp = E->getBuiltinCallee()) {
default:
return ExprEvaluatorBaseTy::VisitCallExpr(E);
@@ -6835,8 +7324,9 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
assert(Type <= 3 && "unexpected type");
- if (TryEvaluateBuiltinObjectSize(E, Type))
- return true;
+ uint64_t Size;
+ if (tryEvaluateBuiltinObjectSize(E->getArg(0), Type, Info, Size))
+ return Success(Size, E);
if (E->getArg(0)->HasSideEffects(Info.Ctx))
return Success((Type & 2) ? 0 : -1, E);
@@ -6849,7 +7339,7 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
case EvalInfo::EM_ConstantFold:
case EvalInfo::EM_EvaluateForOverflow:
case EvalInfo::EM_IgnoreSideEffects:
- case EvalInfo::EM_DesignatorFold:
+ case EvalInfo::EM_OffsetFold:
// Leave it to IR generation.
return Error(E);
case EvalInfo::EM_ConstantExpressionUnevaluated:
@@ -6857,6 +7347,8 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
// Reduce it to a constant now.
return Success((Type & 2) ? 0 : -1, E);
}
+
+ llvm_unreachable("unexpected EvalMode");
}
case Builtin::BI__builtin_bswap16:
@@ -6990,20 +7482,25 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
}
case Builtin::BIstrlen:
+ case Builtin::BIwcslen:
// A call to strlen is not a constant expression.
if (Info.getLangOpts().CPlusPlus11)
Info.CCEDiag(E, diag::note_constexpr_invalid_function)
- << /*isConstexpr*/0 << /*isConstructor*/0 << "'strlen'";
+ << /*isConstexpr*/0 << /*isConstructor*/0
+ << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
else
Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
// Fall through.
- case Builtin::BI__builtin_strlen: {
+ case Builtin::BI__builtin_strlen:
+ case Builtin::BI__builtin_wcslen: {
// As an extension, we support __builtin_strlen() as a constant expression,
// and support folding strlen() to a constant.
LValue String;
if (!EvaluatePointer(E->getArg(0), String, Info))
return false;
+ QualType CharTy = E->getArg(0)->getType()->getPointeeType();
+
// Fast path: if it's a string literal, search the string value.
if (const StringLiteral *S = dyn_cast_or_null<StringLiteral>(
String.getLValueBase().dyn_cast<const Expr *>())) {
@@ -7012,7 +7509,9 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
StringRef Str = S->getBytes();
int64_t Off = String.Offset.getQuantity();
if (Off >= 0 && (uint64_t)Off <= (uint64_t)Str.size() &&
- S->getCharByteWidth() == 1) {
+ S->getCharByteWidth() == 1 &&
+ // FIXME: Add fast-path for wchar_t too.
+ Info.Ctx.hasSameUnqualifiedType(CharTy, Info.Ctx.CharTy)) {
Str = Str.substr(Off);
StringRef::size_type Pos = Str.find(0);
@@ -7026,7 +7525,6 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
}
// Slow path: scan the bytes of the string looking for the terminating 0.
- QualType CharTy = E->getArg(0)->getType()->getPointeeType();
for (uint64_t Strlen = 0; /**/; ++Strlen) {
APValue Char;
if (!handleLValueToRValueConversion(Info, E, CharTy, String, Char) ||
@@ -7039,6 +7537,66 @@ bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
}
}
+ case Builtin::BIstrcmp:
+ case Builtin::BIwcscmp:
+ case Builtin::BIstrncmp:
+ case Builtin::BIwcsncmp:
+ case Builtin::BImemcmp:
+ case Builtin::BIwmemcmp:
+ // A call to strlen is not a constant expression.
+ if (Info.getLangOpts().CPlusPlus11)
+ Info.CCEDiag(E, diag::note_constexpr_invalid_function)
+ << /*isConstexpr*/0 << /*isConstructor*/0
+ << (std::string("'") + Info.Ctx.BuiltinInfo.getName(BuiltinOp) + "'");
+ else
+ Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ // Fall through.
+ case Builtin::BI__builtin_strcmp:
+ case Builtin::BI__builtin_wcscmp:
+ case Builtin::BI__builtin_strncmp:
+ case Builtin::BI__builtin_wcsncmp:
+ case Builtin::BI__builtin_memcmp:
+ case Builtin::BI__builtin_wmemcmp: {
+ LValue String1, String2;
+ if (!EvaluatePointer(E->getArg(0), String1, Info) ||
+ !EvaluatePointer(E->getArg(1), String2, Info))
+ return false;
+
+ QualType CharTy = E->getArg(0)->getType()->getPointeeType();
+
+ uint64_t MaxLength = uint64_t(-1);
+ if (BuiltinOp != Builtin::BIstrcmp &&
+ BuiltinOp != Builtin::BIwcscmp &&
+ BuiltinOp != Builtin::BI__builtin_strcmp &&
+ BuiltinOp != Builtin::BI__builtin_wcscmp) {
+ APSInt N;
+ if (!EvaluateInteger(E->getArg(2), N, Info))
+ return false;
+ MaxLength = N.getExtValue();
+ }
+ bool StopAtNull = (BuiltinOp != Builtin::BImemcmp &&
+ BuiltinOp != Builtin::BIwmemcmp &&
+ BuiltinOp != Builtin::BI__builtin_memcmp &&
+ BuiltinOp != Builtin::BI__builtin_wmemcmp);
+ for (; MaxLength; --MaxLength) {
+ APValue Char1, Char2;
+ if (!handleLValueToRValueConversion(Info, E, CharTy, String1, Char1) ||
+ !handleLValueToRValueConversion(Info, E, CharTy, String2, Char2) ||
+ !Char1.isInt() || !Char2.isInt())
+ return false;
+ if (Char1.getInt() != Char2.getInt())
+ return Success(Char1.getInt() < Char2.getInt() ? -1 : 1, E);
+ if (StopAtNull && !Char1.getInt())
+ return Success(0, E);
+ assert(!(StopAtNull && !Char2.getInt()));
+ if (!HandleLValueArrayAdjustment(Info, E, String1, CharTy, 1) ||
+ !HandleLValueArrayAdjustment(Info, E, String2, CharTy, 1))
+ return false;
+ }
+ // We hit the strncmp / memcmp limit.
+ return Success(0, E);
+ }
+
case Builtin::BI__atomic_always_lock_free:
case Builtin::BI__atomic_is_lock_free:
case Builtin::BI__c11_atomic_is_lock_free: {
@@ -7160,9 +7718,7 @@ class DataRecursiveIntBinOpEvaluator {
enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind;
Job() = default;
- Job(Job &&J)
- : E(J.E), LHSResult(J.LHSResult), Kind(J.Kind),
- SpecEvalRAII(std::move(J.SpecEvalRAII)) {}
+ Job(Job &&) = default;
void startSpeculativeEval(EvalInfo &Info) {
SpecEvalRAII = SpeculativeEvaluationRAII(Info);
@@ -8037,8 +8593,10 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_IntegralComplexToFloatingComplex:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
case CK_NonAtomicToAtomic:
case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
llvm_unreachable("invalid cast kind for integral value");
case CK_BitCast:
@@ -8113,8 +8671,13 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
return true;
}
- APSInt AsInt = Info.Ctx.MakeIntValue(LV.getLValueOffset().getQuantity(),
- SrcType);
+ uint64_t V;
+ if (LV.isNullPointer())
+ V = Info.Ctx.getTargetNullPointerValue(SrcType);
+ else
+ V = LV.getLValueOffset().getQuantity();
+
+ APSInt AsInt = Info.Ctx.MakeIntValue(V, SrcType);
return Success(HandleIntToIntCast(Info, E, DestType, SrcType, AsInt), E);
}
@@ -8528,8 +9091,10 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
case CK_NonAtomicToAtomic:
case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
@@ -9341,6 +9906,8 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::CompoundLiteralExprClass:
case Expr::ExtVectorElementExprClass:
case Expr::DesignatedInitExprClass:
+ case Expr::ArrayInitLoopExprClass:
+ case Expr::ArrayInitIndexExprClass:
case Expr::NoInitExprClass:
case Expr::DesignatedInitUpdateExprClass:
case Expr::ImplicitValueInitExprClass:
@@ -9877,5 +10444,5 @@ bool Expr::tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx,
Expr::EvalStatus Status;
EvalInfo Info(Ctx, Status, EvalInfo::EM_ConstantFold);
- return ::tryEvaluateBuiltinObjectSize(this, Type, Info, Result);
+ return tryEvaluateBuiltinObjectSize(this, Type, Info, Result);
}
diff --git a/lib/AST/ExprObjC.cpp b/lib/AST/ExprObjC.cpp
index 0936a81a597a..31c1b3f15621 100644
--- a/lib/AST/ExprObjC.cpp
+++ b/lib/AST/ExprObjC.cpp
@@ -278,7 +278,7 @@ ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C, unsigned NumArgs,
unsigned NumStoredSelLocs) {
return (ObjCMessageExpr *)C.Allocate(
totalSizeToAlloc<void *, SourceLocation>(NumArgs + 1, NumStoredSelLocs),
- llvm::AlignOf<ObjCMessageExpr>::Alignment);
+ alignof(ObjCMessageExpr));
}
void ObjCMessageExpr::getSelectorLocs(
diff --git a/lib/AST/ItaniumCXXABI.cpp b/lib/AST/ItaniumCXXABI.cpp
index 8a2cc0fbee42..692a455eafc0 100644
--- a/lib/AST/ItaniumCXXABI.cpp
+++ b/lib/AST/ItaniumCXXABI.cpp
@@ -63,9 +63,10 @@ public:
CallOperator->getType()->getAs<FunctionProtoType>();
ASTContext &Context = CallOperator->getASTContext();
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = Proto->isVariadic();
QualType Key =
- Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(),
- FunctionProtoType::ExtProtoInfo());
+ Context.getFunctionType(Context.VoidTy, Proto->getParamTypes(), EPI);
Key = Context.getCanonicalType(Key);
return ++ManglingNumbers[Key->castAs<FunctionProtoType>()];
}
@@ -141,14 +142,6 @@ public:
void addCopyConstructorForExceptionObject(CXXRecordDecl *RD,
CXXConstructorDecl *CD) override {}
- void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
- unsigned ParmIdx, Expr *DAE) override {}
-
- Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
- unsigned ParmIdx) override {
- return nullptr;
- }
-
void addTypedefNameForUnnamedTagDecl(TagDecl *TD,
TypedefNameDecl *DD) override {}
@@ -163,8 +156,9 @@ public:
return nullptr;
}
- MangleNumberingContext *createMangleNumberingContext() const override {
- return new ItaniumNumberingContext();
+ std::unique_ptr<MangleNumberingContext>
+ createMangleNumberingContext() const override {
+ return llvm::make_unique<ItaniumNumberingContext>();
}
};
}
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index 694fde317542..ab3e49d903cf 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -405,12 +405,14 @@ public:
CXXNameMangler(CXXNameMangler &Outer, raw_ostream &Out_)
: Context(Outer.Context), Out(Out_), NullOut(false),
Structor(Outer.Structor), StructorType(Outer.StructorType),
- SeqID(Outer.SeqID), AbiTagsRoot(AbiTags) {}
+ SeqID(Outer.SeqID), FunctionTypeDepth(Outer.FunctionTypeDepth),
+ AbiTagsRoot(AbiTags), Substitutions(Outer.Substitutions) {}
CXXNameMangler(CXXNameMangler &Outer, llvm::raw_null_ostream &Out_)
: Context(Outer.Context), Out(Out_), NullOut(true),
Structor(Outer.Structor), StructorType(Outer.StructorType),
- SeqID(Outer.SeqID), AbiTagsRoot(AbiTags) {}
+ SeqID(Outer.SeqID), FunctionTypeDepth(Outer.FunctionTypeDepth),
+ AbiTagsRoot(AbiTags), Substitutions(Outer.Substitutions) {}
#if MANGLE_CHECKER
~CXXNameMangler() {
@@ -458,11 +460,15 @@ private:
void addSubstitution(QualType T);
void addSubstitution(TemplateName Template);
void addSubstitution(uintptr_t Ptr);
+ // Destructive copy substitutions from other mangler.
+ void extendSubstitutions(CXXNameMangler* Other);
void mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
bool recursive = false);
void mangleUnresolvedName(NestedNameSpecifier *qualifier,
DeclarationName name,
+ const TemplateArgumentLoc *TemplateArgs,
+ unsigned NumTemplateArgs,
unsigned KnownArity = UnknownArity);
void mangleFunctionEncodingBareType(const FunctionDecl *FD);
@@ -487,6 +493,7 @@ private:
void mangleUnscopedTemplateName(TemplateName,
const AbiTagList *AdditionalAbiTags);
void mangleSourceName(const IdentifierInfo *II);
+ void mangleRegCallName(const IdentifierInfo *II);
void mangleSourceNameWithAbiTags(
const NamedDecl *ND, const AbiTagList *AdditionalAbiTags = nullptr);
void mangleLocalName(const Decl *D,
@@ -537,6 +544,8 @@ private:
NestedNameSpecifier *qualifier,
NamedDecl *firstQualifierLookup,
DeclarationName name,
+ const TemplateArgumentLoc *TemplateArgs,
+ unsigned NumTemplateArgs,
unsigned knownArity);
void mangleCastExpression(const Expr *E, StringRef CastEncoding);
void mangleInitListElements(const InitListExpr *InitList);
@@ -593,7 +602,7 @@ bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
return false;
const VarDecl *VD = dyn_cast<VarDecl>(D);
- if (VD) {
+ if (VD && !isa<DecompositionDecl>(D)) {
// C variables are not mangled.
if (VD->isExternC())
return false;
@@ -685,6 +694,10 @@ void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
// Output name with implicit tags and function encoding from temporary buffer.
mangleNameWithAbiTags(FD, &AdditionalAbiTags);
Out << FunctionEncodingStream.str().substr(EncodingPositionStart);
+
+ // Function encoding could create new substitutions so we have to add
+ // temp mangled substitutions to main mangler.
+ extendSubstitutions(&FunctionEncodingMangler);
}
void CXXNameMangler::mangleFunctionEncodingBareType(const FunctionDecl *FD) {
@@ -1151,9 +1164,10 @@ void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
/// Mangle an unresolved-name, which is generally used for names which
/// weren't resolved to specific entities.
-void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *qualifier,
- DeclarationName name,
- unsigned knownArity) {
+void CXXNameMangler::mangleUnresolvedName(
+ NestedNameSpecifier *qualifier, DeclarationName name,
+ const TemplateArgumentLoc *TemplateArgs, unsigned NumTemplateArgs,
+ unsigned knownArity) {
if (qualifier) mangleUnresolvedPrefix(qualifier);
switch (name.getNameKind()) {
// <base-unresolved-name> ::= <simple-id>
@@ -1181,6 +1195,11 @@ void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *qualifier,
case DeclarationName::ObjCZeroArgSelector:
llvm_unreachable("Can't mangle Objective-C selector names here!");
}
+
+ // The <simple-id> and on <operator-name> productions end in an optional
+ // <template-args>.
+ if (TemplateArgs)
+ mangleTemplateArgs(TemplateArgs, NumTemplateArgs);
}
void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
@@ -1193,7 +1212,26 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// ::= <source-name>
switch (Name.getNameKind()) {
case DeclarationName::Identifier: {
- if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ const IdentifierInfo *II = Name.getAsIdentifierInfo();
+
+ // We mangle decomposition declarations as the names of their bindings.
+ if (auto *DD = dyn_cast<DecompositionDecl>(ND)) {
+ // FIXME: Non-standard mangling for decomposition declarations:
+ //
+ // <unqualified-name> ::= DC <source-name>* E
+ //
+ // These can never be referenced across translation units, so we do
+ // not need a cross-vendor mangling for anything other than demanglers.
+ // Proposed on cxx-abi-dev on 2016-08-12
+ Out << "DC";
+ for (auto *BD : DD->bindings())
+ mangleSourceName(BD->getDeclName().getAsIdentifierInfo());
+ Out << 'E';
+ writeAbiTags(ND, AdditionalAbiTags);
+ break;
+ }
+
+ if (II) {
// We must avoid conflicts between internally- and externally-
// linked variable and function declaration names in the same TU:
// void test() { extern void foo(); }
@@ -1204,7 +1242,15 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
getEffectiveDeclContext(ND)->isFileContext())
Out << 'L';
- mangleSourceName(II);
+ auto *FD = dyn_cast<FunctionDecl>(ND);
+ bool IsRegCall = FD &&
+ FD->getType()->castAs<FunctionType>()->getCallConv() ==
+ clang::CC_X86RegCall;
+ if (IsRegCall)
+ mangleRegCallName(II);
+ else
+ mangleSourceName(II);
+
writeAbiTags(ND, AdditionalAbiTags);
break;
}
@@ -1378,6 +1424,14 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
}
}
+void CXXNameMangler::mangleRegCallName(const IdentifierInfo *II) {
+ // <source-name> ::= <positive length number> __regcall3__ <identifier>
+ // <number> ::= [n] <non-negative decimal integer>
+ // <identifier> ::= <unqualified source code identifier>
+ Out << II->getLength() + sizeof("__regcall3__") - 1 << "__regcall3__"
+ << II->getName();
+}
+
void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
// <source-name> ::= <positive length number> <identifier>
// <number> ::= [n] <non-negative decimal integer>
@@ -1471,7 +1525,7 @@ void CXXNameMangler::mangleLocalName(const Decl *D,
// numbering will be local to the particular argument in which it appears
// -- other default arguments do not affect its encoding.
const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD);
- if (CXXRD->isLambda()) {
+ if (CXXRD && CXXRD->isLambda()) {
if (const ParmVarDecl *Parm
= dyn_cast_or_null<ParmVarDecl>(CXXRD->getLambdaContextDecl())) {
if (const FunctionDecl *Func
@@ -1820,6 +1874,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
+ case Type::ObjCTypeParam:
case Type::Atomic:
case Type::Pipe:
llvm_unreachable("type is illegal as a nested name specifier");
@@ -2207,6 +2262,22 @@ void CXXNameMangler::mangleType(QualType T) {
// they aren't written.
// - Conversions on non-type template arguments need to be expressed, since
// they can affect the mangling of sizeof/alignof.
+ //
+ // FIXME: This is wrong when mapping to the canonical type for a dependent
+ // type discards instantiation-dependent portions of the type, such as for:
+ //
+ // template<typename T, int N> void f(T (&)[sizeof(N)]);
+ // template<typename T> void f(T() throw(typename T::type)); (pre-C++17)
+ //
+ // It's also wrong in the opposite direction when instantiation-dependent,
+ // canonically-equivalent types differ in some irrelevant portion of inner
+ // type sugar. In such cases, we fail to form correct substitutions, eg:
+ //
+ // template<int N> void f(A<sizeof(N)> *, A<sizeof(N)> (*));
+ //
+ // We should instead canonicalize the non-instantiation-dependent parts,
+ // regardless of whether the type as a whole is dependent or instantiation
+ // dependent.
if (!T->isInstantiationDependentType() || T->isDependentType())
T = T.getCanonicalType();
else {
@@ -2443,6 +2514,7 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) {
case CC_X86Pascal:
case CC_X86_64Win64:
case CC_X86_64SysV:
+ case CC_X86RegCall:
case CC_AAPCS:
case CC_AAPCS_VFP:
case CC_IntelOclBicc:
@@ -2509,6 +2581,24 @@ void CXXNameMangler::mangleType(const FunctionProtoType *T) {
// e.g. "const" in "int (A::*)() const".
mangleQualifiers(Qualifiers::fromCVRMask(T->getTypeQuals()));
+ // Mangle instantiation-dependent exception-specification, if present,
+ // per cxx-abi-dev proposal on 2016-10-11.
+ if (T->hasInstantiationDependentExceptionSpec()) {
+ if (T->getExceptionSpecType() == EST_ComputedNoexcept) {
+ Out << "DO";
+ mangleExpression(T->getNoexceptExpr());
+ Out << "E";
+ } else {
+ assert(T->getExceptionSpecType() == EST_Dynamic);
+ Out << "Dw";
+ for (auto ExceptTy : T->exceptions())
+ mangleType(ExceptTy);
+ Out << "E";
+ }
+ } else if (T->isNothrow(getASTContext())) {
+ Out << "Do";
+ }
+
Out << 'F';
// FIXME: We don't have enough information in the AST to produce the 'Y'
@@ -3115,12 +3205,14 @@ void CXXNameMangler::mangleMemberExpr(const Expr *base,
NestedNameSpecifier *qualifier,
NamedDecl *firstQualifierLookup,
DeclarationName member,
+ const TemplateArgumentLoc *TemplateArgs,
+ unsigned NumTemplateArgs,
unsigned arity) {
// <expression> ::= dt <expression> <unresolved-name>
// ::= pt <expression> <unresolved-name>
if (base)
mangleMemberExprBase(base, isArrow);
- mangleUnresolvedName(qualifier, member, arity);
+ mangleUnresolvedName(qualifier, member, TemplateArgs, NumTemplateArgs, arity);
}
/// Look at the callee of the given call expression and determine if
@@ -3209,6 +3301,8 @@ recurse:
case Expr::AddrLabelExprClass:
case Expr::DesignatedInitUpdateExprClass:
case Expr::ImplicitValueInitExprClass:
+ case Expr::ArrayInitLoopExprClass:
+ case Expr::ArrayInitIndexExprClass:
case Expr::NoInitExprClass:
case Expr::ParenListExprClass:
case Expr::LambdaExprClass:
@@ -3418,7 +3512,9 @@ recurse:
const MemberExpr *ME = cast<MemberExpr>(E);
mangleMemberExpr(ME->getBase(), ME->isArrow(),
ME->getQualifier(), nullptr,
- ME->getMemberDecl()->getDeclName(), Arity);
+ ME->getMemberDecl()->getDeclName(),
+ ME->getTemplateArgs(), ME->getNumTemplateArgs(),
+ Arity);
break;
}
@@ -3426,9 +3522,9 @@ recurse:
const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
mangleMemberExpr(ME->isImplicitAccess() ? nullptr : ME->getBase(),
ME->isArrow(), ME->getQualifier(), nullptr,
- ME->getMemberName(), Arity);
- if (ME->hasExplicitTemplateArgs())
- mangleTemplateArgs(ME->getTemplateArgs(), ME->getNumTemplateArgs());
+ ME->getMemberName(),
+ ME->getTemplateArgs(), ME->getNumTemplateArgs(),
+ Arity);
break;
}
@@ -3438,21 +3534,17 @@ recurse:
mangleMemberExpr(ME->isImplicitAccess() ? nullptr : ME->getBase(),
ME->isArrow(), ME->getQualifier(),
ME->getFirstQualifierFoundInScope(),
- ME->getMember(), Arity);
- if (ME->hasExplicitTemplateArgs())
- mangleTemplateArgs(ME->getTemplateArgs(), ME->getNumTemplateArgs());
+ ME->getMember(),
+ ME->getTemplateArgs(), ME->getNumTemplateArgs(),
+ Arity);
break;
}
case Expr::UnresolvedLookupExprClass: {
const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
- mangleUnresolvedName(ULE->getQualifier(), ULE->getName(), Arity);
-
- // All the <unresolved-name> productions end in a
- // base-unresolved-name, where <template-args> are just tacked
- // onto the end.
- if (ULE->hasExplicitTemplateArgs())
- mangleTemplateArgs(ULE->getTemplateArgs(), ULE->getNumTemplateArgs());
+ mangleUnresolvedName(ULE->getQualifier(), ULE->getName(),
+ ULE->getTemplateArgs(), ULE->getNumTemplateArgs(),
+ Arity);
break;
}
@@ -3707,7 +3799,10 @@ recurse:
case Expr::CXXOperatorCallExprClass: {
const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
unsigned NumArgs = CE->getNumArgs();
- mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs);
+ // A CXXOperatorCallExpr for OO_Arrow models only semantics, not syntax
+ // (the enclosing MemberExpr covers the syntactic portion).
+ if (CE->getOperator() != OO_Arrow)
+ mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs);
// Mangle the arguments.
for (unsigned i = 0; i != NumArgs; ++i)
mangleExpression(CE->getArg(i));
@@ -3768,13 +3863,9 @@ recurse:
case Expr::DependentScopeDeclRefExprClass: {
const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
- mangleUnresolvedName(DRE->getQualifier(), DRE->getDeclName(), Arity);
-
- // All the <unresolved-name> productions end in a
- // base-unresolved-name, where <template-args> are just tacked
- // onto the end.
- if (DRE->hasExplicitTemplateArgs())
- mangleTemplateArgs(DRE->getTemplateArgs(), DRE->getNumTemplateArgs());
+ mangleUnresolvedName(DRE->getQualifier(), DRE->getDeclName(),
+ DRE->getTemplateArgs(), DRE->getNumTemplateArgs(),
+ Arity);
break;
}
@@ -4406,6 +4497,14 @@ void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
Substitutions[Ptr] = SeqID++;
}
+void CXXNameMangler::extendSubstitutions(CXXNameMangler* Other) {
+ assert(Other->SeqID >= SeqID && "Must be superset of substitutions!");
+ if (Other->SeqID > SeqID) {
+ Substitutions.swap(Other->Substitutions);
+ SeqID = Other->SeqID;
+ }
+}
+
CXXNameMangler::AbiTagList
CXXNameMangler::makeFunctionReturnTypeTags(const FunctionDecl *FD) {
// When derived abi tags are disabled there is no need to make any list.
diff --git a/lib/AST/Mangle.cpp b/lib/AST/Mangle.cpp
index ee241732e8ad..05dd886adcef 100644
--- a/lib/AST/Mangle.cpp
+++ b/lib/AST/Mangle.cpp
@@ -52,6 +52,7 @@ void MangleContext::anchor() { }
enum CCMangling {
CCM_Other,
CCM_Fast,
+ CCM_RegCall,
CCM_Vector,
CCM_Std
};
@@ -152,6 +153,8 @@ void MangleContext::mangleName(const NamedDecl *D, raw_ostream &Out) {
Out << '_';
else if (CC == CCM_Fast)
Out << '@';
+ else if (CC == CCM_RegCall)
+ Out << "__regcall3__";
if (!MCXX)
Out << D->getIdentifier()->getName();
diff --git a/lib/AST/MicrosoftCXXABI.cpp b/lib/AST/MicrosoftCXXABI.cpp
index 3ae04538d626..73324e40f3b1 100644
--- a/lib/AST/MicrosoftCXXABI.cpp
+++ b/lib/AST/MicrosoftCXXABI.cpp
@@ -67,8 +67,6 @@ public:
class MicrosoftCXXABI : public CXXABI {
ASTContext &Context;
llvm::SmallDenseMap<CXXRecordDecl *, CXXConstructorDecl *> RecordToCopyCtor;
- llvm::SmallDenseMap<std::pair<const CXXConstructorDecl *, unsigned>, Expr *>
- CtorToDefaultArgExpr;
llvm::SmallDenseMap<TagDecl *, DeclaratorDecl *>
UnnamedTagDeclToDeclaratorDecl;
@@ -92,16 +90,6 @@ public:
llvm_unreachable("unapplicable to the MS ABI");
}
- void addDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
- unsigned ParmIdx, Expr *DAE) override {
- CtorToDefaultArgExpr[std::make_pair(CD, ParmIdx)] = DAE;
- }
-
- Expr *getDefaultArgExprForConstructor(const CXXConstructorDecl *CD,
- unsigned ParmIdx) override {
- return CtorToDefaultArgExpr[std::make_pair(CD, ParmIdx)];
- }
-
const CXXConstructorDecl *
getCopyConstructorForExceptionObject(CXXRecordDecl *RD) override {
return RecordToCopyCtor[RD];
@@ -143,8 +131,9 @@ public:
const_cast<TagDecl *>(TD->getCanonicalDecl()));
}
- MangleNumberingContext *createMangleNumberingContext() const override {
- return new MicrosoftNumberingContext();
+ std::unique_ptr<MangleNumberingContext>
+ createMangleNumberingContext() const override {
+ return llvm::make_unique<MicrosoftNumberingContext>();
}
};
}
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index 351997e02a9d..911b8b471a05 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -66,6 +66,16 @@ struct msvc_hashing_ostream : public llvm::raw_svector_ostream {
}
};
+static const DeclContext *
+getLambdaDefaultArgumentDeclContext(const Decl *D) {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(D))
+ if (RD->isLambda())
+ if (const auto *Parm =
+ dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
+ return Parm->getDeclContext();
+ return nullptr;
+}
+
/// \brief Retrieve the declaration context that should be used when mangling
/// the given declaration.
static const DeclContext *getEffectiveDeclContext(const Decl *D) {
@@ -75,12 +85,8 @@ static const DeclContext *getEffectiveDeclContext(const Decl *D) {
// not the case: the lambda closure type ends up living in the context
// where the function itself resides, because the function declaration itself
// had not yet been created. Fix the context here.
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
- if (RD->isLambda())
- if (ParmVarDecl *ContextParam =
- dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
- return ContextParam->getDeclContext();
- }
+ if (const auto *LDADC = getLambdaDefaultArgumentDeclContext(D))
+ return LDADC;
// Perform the same check for block literals.
if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
@@ -112,14 +118,6 @@ static const FunctionDecl *getStructor(const NamedDecl *ND) {
return FD;
}
-static bool isLambda(const NamedDecl *ND) {
- const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
- if (!Record)
- return false;
-
- return Record->isLambda();
-}
-
/// MicrosoftMangleContextImpl - Overrides the default MangleContext for the
/// Microsoft Visual C++ ABI.
class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
@@ -200,9 +198,11 @@ public:
// Lambda closure types are already numbered, give out a phony number so
// that they demangle nicely.
- if (isLambda(ND)) {
- disc = 1;
- return true;
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(ND)) {
+ if (RD->isLambda()) {
+ disc = 1;
+ return true;
+ }
}
// Use the canonical number for externally visible decls.
@@ -394,7 +394,8 @@ bool MicrosoftMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
if (!getASTContext().getLangOpts().CPlusPlus)
return false;
- if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (VD && !isa<DecompositionDecl>(D)) {
// C variables are not mangled.
if (VD->isExternC())
return false;
@@ -780,6 +781,21 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
}
}
+ if (const DecompositionDecl *DD = dyn_cast<DecompositionDecl>(ND)) {
+ // FIXME: Invented mangling for decomposition declarations:
+ // [X,Y,Z]
+ // where X,Y,Z are the names of the bindings.
+ llvm::SmallString<128> Name("[");
+ for (auto *BD : DD->bindings()) {
+ if (Name.size() > 1)
+ Name += ',';
+ Name += BD->getDeclName().getAsIdentifierInfo()->getName();
+ }
+ Name += ']';
+ mangleSourceName(Name);
+ break;
+ }
+
if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
// We must have an anonymous union or struct declaration.
const CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl();
@@ -808,9 +824,24 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
if (Record->isLambda()) {
llvm::SmallString<10> Name("<lambda_");
+
+ Decl *LambdaContextDecl = Record->getLambdaContextDecl();
+ unsigned LambdaManglingNumber = Record->getLambdaManglingNumber();
unsigned LambdaId;
- if (Record->getLambdaManglingNumber())
- LambdaId = Record->getLambdaManglingNumber();
+ const ParmVarDecl *Parm =
+ dyn_cast_or_null<ParmVarDecl>(LambdaContextDecl);
+ const FunctionDecl *Func =
+ Parm ? dyn_cast<FunctionDecl>(Parm->getDeclContext()) : nullptr;
+
+ if (Func) {
+ unsigned DefaultArgNo =
+ Func->getNumParams() - Parm->getFunctionScopeIndex();
+ Name += llvm::utostr(DefaultArgNo);
+ Name += "_";
+ }
+
+ if (LambdaManglingNumber)
+ LambdaId = LambdaManglingNumber;
else
LambdaId = Context.getLambdaId(Record);
@@ -818,25 +849,42 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
Name += ">";
mangleSourceName(Name);
+
+ // If the context of a closure type is an initializer for a class
+ // member (static or nonstatic), it is encoded in a qualified name.
+ if (LambdaManglingNumber && LambdaContextDecl) {
+ if ((isa<VarDecl>(LambdaContextDecl) ||
+ isa<FieldDecl>(LambdaContextDecl)) &&
+ LambdaContextDecl->getDeclContext()->isRecord()) {
+ mangleUnqualifiedName(cast<NamedDecl>(LambdaContextDecl));
+ }
+ }
break;
}
}
- llvm::SmallString<64> Name("<unnamed-type-");
+ llvm::SmallString<64> Name;
if (DeclaratorDecl *DD =
Context.getASTContext().getDeclaratorForUnnamedTagDecl(TD)) {
// Anonymous types without a name for linkage purposes have their
// declarator mangled in if they have one.
+ Name += "<unnamed-type-";
Name += DD->getName();
} else if (TypedefNameDecl *TND =
Context.getASTContext().getTypedefNameForUnnamedTagDecl(
TD)) {
// Anonymous types without a name for linkage purposes have their
// associate typedef mangled in if they have one.
+ Name += "<unnamed-type-";
Name += TND->getName();
+ } else if (auto *ED = dyn_cast<EnumDecl>(TD)) {
+ auto EnumeratorI = ED->enumerator_begin();
+ assert(EnumeratorI != ED->enumerator_end());
+ Name += "<unnamed-enum-";
+ Name += EnumeratorI->getName();
} else {
// Otherwise, number the types using a $S prefix.
- Name += "$S";
+ Name += "<unnamed-type-$S";
Name += llvm::utostr(Context.getAnonymousStructId(TD) + 1);
}
Name += ">";
@@ -921,7 +969,6 @@ void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
// for how this should be done.
Out << "__block_invoke" << Context.getBlockId(BD, false);
Out << '@';
- continue;
} else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
mangleObjCMethodName(Method);
} else if (isa<NamedDecl>(DC)) {
@@ -929,8 +976,15 @@ void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
mangle(FD, "?");
break;
- } else
+ } else {
mangleUnqualifiedName(ND);
+ // Lambdas in default arguments conceptually belong to the function the
+ // parameter corresponds to.
+ if (const auto *LDADC = getLambdaDefaultArgumentDeclContext(ND)) {
+ DC = LDADC;
+ continue;
+ }
+ }
}
DC = DC->getParent();
}
@@ -1073,6 +1127,8 @@ void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO,
case OO_Array_New: Out << "?_U"; break;
// <operator-name> ::= ?_V # delete[]
case OO_Array_Delete: Out << "?_V"; break;
+ // <operator-name> ::= ?__L # co_await
+ case OO_Coawait: Out << "?__L"; break;
case OO_Conditional: {
DiagnosticsEngine &Diags = Context.getDiags();
@@ -1082,14 +1138,6 @@ void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO,
break;
}
- case OO_Coawait: {
- DiagnosticsEngine &Diags = Context.getDiags();
- unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
- "cannot mangle this operator co_await yet");
- Diags.Report(Loc, DiagID);
- break;
- }
-
case OO_None:
case NUM_OVERLOADED_OPERATORS:
llvm_unreachable("Not an overloaded operator");
@@ -1993,6 +2041,7 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
// ::= I # __fastcall
// ::= J # __export __fastcall
// ::= Q # __vectorcall
+ // ::= w # __regcall
// The 'export' calling conventions are from a bygone era
// (*cough*Win16*cough*) when functions were declared for export with
// that keyword. (It didn't actually export them, it just made them so
@@ -2010,6 +2059,7 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
case CC_X86StdCall: Out << 'G'; break;
case CC_X86FastCall: Out << 'I'; break;
case CC_X86VectorCall: Out << 'Q'; break;
+ case CC_X86RegCall: Out << 'w'; break;
}
}
void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T) {
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
index 82809d7ea7b5..514c7c9f5b33 100644
--- a/lib/AST/NestedNameSpecifier.cpp
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -34,8 +34,8 @@ NestedNameSpecifier::FindOrInsert(const ASTContext &Context,
NestedNameSpecifier *NNS
= Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos);
if (!NNS) {
- NNS = new (Context, llvm::alignOf<NestedNameSpecifier>())
- NestedNameSpecifier(Mockup);
+ NNS =
+ new (Context, alignof(NestedNameSpecifier)) NestedNameSpecifier(Mockup);
Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos);
}
@@ -113,8 +113,7 @@ NestedNameSpecifier *
NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) {
if (!Context.GlobalNestedNameSpecifier)
Context.GlobalNestedNameSpecifier =
- new (Context, llvm::alignOf<NestedNameSpecifier>())
- NestedNameSpecifier();
+ new (Context, alignof(NestedNameSpecifier)) NestedNameSpecifier();
return Context.GlobalNestedNameSpecifier;
}
@@ -155,7 +154,7 @@ NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
/// \brief Retrieve the namespace stored in this nested name specifier.
NamespaceDecl *NestedNameSpecifier::getAsNamespace() const {
- if (Prefix.getInt() == StoredDecl)
+ if (Prefix.getInt() == StoredDecl)
return dyn_cast<NamespaceDecl>(static_cast<NamedDecl *>(Specifier));
return nullptr;
@@ -163,7 +162,7 @@ NamespaceDecl *NestedNameSpecifier::getAsNamespace() const {
/// \brief Retrieve the namespace alias stored in this nested name specifier.
NamespaceAliasDecl *NestedNameSpecifier::getAsNamespaceAlias() const {
- if (Prefix.getInt() == StoredDecl)
+ if (Prefix.getInt() == StoredDecl)
return dyn_cast<NamespaceAliasDecl>(static_cast<NamedDecl *>(Specifier));
return nullptr;
@@ -687,7 +686,7 @@ NestedNameSpecifierLocBuilder::getWithLocInContext(ASTContext &Context) const {
// FIXME: After copying the source-location information, should we free
// our (temporary) buffer and adopt the ASTContext-allocated memory?
// Doing so would optimize repeated calls to getWithLocInContext().
- void *Mem = Context.Allocate(BufferSize, llvm::alignOf<void *>());
+ void *Mem = Context.Allocate(BufferSize, alignof(void *));
memcpy(Mem, Buffer, BufferSize);
return NestedNameSpecifierLoc(Representation, Mem);
}
diff --git a/lib/AST/OpenMPClause.cpp b/lib/AST/OpenMPClause.cpp
index d04ba727bb05..a28b9f3b6d64 100644
--- a/lib/AST/OpenMPClause.cpp
+++ b/lib/AST/OpenMPClause.cpp
@@ -732,38 +732,113 @@ OMPFromClause *OMPFromClause::CreateEmpty(const ASTContext &C, unsigned NumVars,
NumComponentLists, NumComponents);
}
-OMPUseDevicePtrClause *OMPUseDevicePtrClause::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc,
- ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
- OMPUseDevicePtrClause *Clause =
- new (Mem) OMPUseDevicePtrClause(StartLoc, LParenLoc, EndLoc, VL.size());
- Clause->setVarRefs(VL);
+void OMPUseDevicePtrClause::setPrivateCopies(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of private copies is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), varlist_end());
+}
+
+void OMPUseDevicePtrClause::setInits(ArrayRef<Expr *> VL) {
+ assert(VL.size() == varlist_size() &&
+ "Number of inits is not the same as the preallocated buffer");
+ std::copy(VL.begin(), VL.end(), getPrivateCopies().end());
+}
+
+OMPUseDevicePtrClause *OMPUseDevicePtrClause::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars,
+ ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists) {
+ unsigned NumVars = Vars.size();
+ unsigned NumUniqueDeclarations =
+ getUniqueDeclarationsTotalNumber(Declarations);
+ unsigned NumComponentLists = ComponentLists.size();
+ unsigned NumComponents = getComponentsTotalNumber(ComponentLists);
+
+ // We need to allocate:
+ // 3 x NumVars x Expr* - we have an original list expression for each clause
+ // list entry and an equal number of private copies and inits.
+ // NumUniqueDeclarations x ValueDecl* - unique base declarations associated
+ // with each component list.
+ // (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
+ // number of lists for each unique declaration and the size of each component
+ // list.
+ // NumComponents x MappableComponent - the total of all the components in all
+ // the lists.
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ 3 * NumVars, NumUniqueDeclarations,
+ NumUniqueDeclarations + NumComponentLists, NumComponents));
+
+ OMPUseDevicePtrClause *Clause = new (Mem) OMPUseDevicePtrClause(
+ StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations,
+ NumComponentLists, NumComponents);
+
+ Clause->setVarRefs(Vars);
+ Clause->setPrivateCopies(PrivateVars);
+ Clause->setInits(Inits);
+ Clause->setClauseInfo(Declarations, ComponentLists);
return Clause;
}
-OMPUseDevicePtrClause *OMPUseDevicePtrClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
- return new (Mem) OMPUseDevicePtrClause(N);
+OMPUseDevicePtrClause *OMPUseDevicePtrClause::CreateEmpty(
+ const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations,
+ unsigned NumComponentLists, unsigned NumComponents) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ 3 * NumVars, NumUniqueDeclarations,
+ NumUniqueDeclarations + NumComponentLists, NumComponents));
+ return new (Mem) OMPUseDevicePtrClause(NumVars, NumUniqueDeclarations,
+ NumComponentLists, NumComponents);
}
-OMPIsDevicePtrClause *OMPIsDevicePtrClause::Create(const ASTContext &C,
- SourceLocation StartLoc,
- SourceLocation LParenLoc,
- SourceLocation EndLoc,
- ArrayRef<Expr *> VL) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
- OMPIsDevicePtrClause *Clause =
- new (Mem) OMPIsDevicePtrClause(StartLoc, LParenLoc, EndLoc, VL.size());
- Clause->setVarRefs(VL);
+OMPIsDevicePtrClause *
+OMPIsDevicePtrClause::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> Vars,
+ ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists) {
+ unsigned NumVars = Vars.size();
+ unsigned NumUniqueDeclarations =
+ getUniqueDeclarationsTotalNumber(Declarations);
+ unsigned NumComponentLists = ComponentLists.size();
+ unsigned NumComponents = getComponentsTotalNumber(ComponentLists);
+
+ // We need to allocate:
+ // NumVars x Expr* - we have an original list expression for each clause list
+ // entry.
+ // NumUniqueDeclarations x ValueDecl* - unique base declarations associated
+ // with each component list.
+ // (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
+ // number of lists for each unique declaration and the size of each component
+ // list.
+ // NumComponents x MappableComponent - the total of all the components in all
+ // the lists.
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ NumVars, NumUniqueDeclarations,
+ NumUniqueDeclarations + NumComponentLists, NumComponents));
+
+ OMPIsDevicePtrClause *Clause = new (Mem) OMPIsDevicePtrClause(
+ StartLoc, LParenLoc, EndLoc, NumVars, NumUniqueDeclarations,
+ NumComponentLists, NumComponents);
+
+ Clause->setVarRefs(Vars);
+ Clause->setClauseInfo(Declarations, ComponentLists);
return Clause;
}
-OMPIsDevicePtrClause *OMPIsDevicePtrClause::CreateEmpty(const ASTContext &C,
- unsigned N) {
- void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
- return new (Mem) OMPIsDevicePtrClause(N);
+OMPIsDevicePtrClause *OMPIsDevicePtrClause::CreateEmpty(
+ const ASTContext &C, unsigned NumVars, unsigned NumUniqueDeclarations,
+ unsigned NumComponentLists, unsigned NumComponents) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ NumVars, NumUniqueDeclarations,
+ NumUniqueDeclarations + NumComponentLists, NumComponents));
+ return new (Mem) OMPIsDevicePtrClause(NumVars, NumUniqueDeclarations,
+ NumComponentLists, NumComponents);
}
diff --git a/lib/AST/RawCommentList.cpp b/lib/AST/RawCommentList.cpp
index 8317f76b8569..881a7d9c61be 100644
--- a/lib/AST/RawCommentList.cpp
+++ b/lib/AST/RawCommentList.cpp
@@ -175,8 +175,8 @@ StringRef RawComment::getRawTextSlow(const SourceManager &SourceMgr) const {
}
const char *RawComment::extractBriefText(const ASTContext &Context) const {
- // Make sure that RawText is valid.
- getRawText(Context.getSourceManager());
+ // Lazily initialize RawText using the accessor before using it.
+ (void)getRawText(Context.getSourceManager());
// Since we will be copying the resulting text, all allocations made during
// parsing are garbage after resulting string is formed. Thus we can use
@@ -202,8 +202,8 @@ const char *RawComment::extractBriefText(const ASTContext &Context) const {
comments::FullComment *RawComment::parse(const ASTContext &Context,
const Preprocessor *PP,
const Decl *D) const {
- // Make sure that RawText is valid.
- getRawText(Context.getSourceManager());
+ // Lazily initialize RawText using the accessor before using it.
+ (void)getRawText(Context.getSourceManager());
comments::Lexer L(Context.getAllocator(), Context.getDiagnostics(),
Context.getCommentCommandTraits(),
@@ -334,4 +334,3 @@ void RawCommentList::addDeserializedComments(ArrayRef<RawComment *> Deserialized
BeforeThanCompare<RawComment>(SourceMgr));
std::swap(Comments, MergedComments);
}
-
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index 75c076399511..697cdc3fb360 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -315,7 +315,7 @@ AttributedStmt *AttributedStmt::Create(const ASTContext &C, SourceLocation Loc,
Stmt *SubStmt) {
assert(!Attrs.empty() && "Attrs should not be empty");
void *Mem = C.Allocate(sizeof(AttributedStmt) + sizeof(Attr *) * Attrs.size(),
- llvm::alignOf<AttributedStmt>());
+ alignof(AttributedStmt));
return new (Mem) AttributedStmt(Loc, Attrs, SubStmt);
}
@@ -323,7 +323,7 @@ AttributedStmt *AttributedStmt::CreateEmpty(const ASTContext &C,
unsigned NumAttrs) {
assert(NumAttrs > 0 && "NumAttrs should be greater than zero");
void *Mem = C.Allocate(sizeof(AttributedStmt) + sizeof(Attr *) * NumAttrs,
- llvm::alignOf<AttributedStmt>());
+ alignof(AttributedStmt));
return new (Mem) AttributedStmt(EmptyShell(), NumAttrs);
}
@@ -533,15 +533,17 @@ unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
DiagOffs = CurPtr-StrStart-1;
return diag::err_asm_invalid_escape;
}
-
+ // Handle escaped char and continue looping over the asm string.
char EscapedChar = *CurPtr++;
- if (EscapedChar == '%') { // %% -> %
- // Escaped percentage sign.
- CurStringPiece += '%';
+ switch (EscapedChar) {
+ default:
+ break;
+ case '%': // %% -> %
+ case '{': // %{ -> {
+ case '}': // %} -> }
+ CurStringPiece += EscapedChar;
continue;
- }
-
- if (EscapedChar == '=') { // %= -> Generate an unique ID.
+ case '=': // %= -> Generate a unique ID.
CurStringPiece += "${:uid}";
continue;
}
@@ -794,6 +796,10 @@ void IfStmt::setConditionVariable(const ASTContext &C, VarDecl *V) {
VarRange.getEnd());
}
+bool IfStmt::isObjCAvailabilityCheck() const {
+ return isa<ObjCAvailabilityCheckExpr>(SubExprs[COND]);
+}
+
ForStmt::ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP)
@@ -998,7 +1004,7 @@ CapturedStmt::Capture *CapturedStmt::getStoredCaptures() const {
unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (NumCaptures + 1);
// Offset of the first Capture object.
- unsigned FirstCaptureOffset = llvm::alignTo(Size, llvm::alignOf<Capture>());
+ unsigned FirstCaptureOffset = llvm::alignTo(Size, alignof(Capture));
return reinterpret_cast<Capture *>(
reinterpret_cast<char *>(const_cast<CapturedStmt *>(this))
@@ -1055,7 +1061,7 @@ CapturedStmt *CapturedStmt::Create(const ASTContext &Context, Stmt *S,
unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (Captures.size() + 1);
if (!Captures.empty()) {
// Realign for the following Capture array.
- Size = llvm::alignTo(Size, llvm::alignOf<Capture>());
+ Size = llvm::alignTo(Size, alignof(Capture));
Size += sizeof(Capture) * Captures.size();
}
@@ -1068,7 +1074,7 @@ CapturedStmt *CapturedStmt::CreateDeserialized(const ASTContext &Context,
unsigned Size = sizeof(CapturedStmt) + sizeof(Stmt *) * (NumCaptures + 1);
if (NumCaptures > 0) {
// Realign for the following Capture array.
- Size = llvm::alignTo(Size, llvm::alignOf<Capture>());
+ Size = llvm::alignTo(Size, alignof(Capture));
Size += sizeof(Capture) * NumCaptures;
}
diff --git a/lib/AST/StmtCXX.cpp b/lib/AST/StmtCXX.cpp
index 4692db84b505..4a04fc211262 100644
--- a/lib/AST/StmtCXX.cpp
+++ b/lib/AST/StmtCXX.cpp
@@ -28,7 +28,7 @@ CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, SourceLocation tryLoc,
std::size_t Size = sizeof(CXXTryStmt);
Size += ((handlers.size() + 1) * sizeof(Stmt *));
- void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
+ void *Mem = C.Allocate(Size, alignof(CXXTryStmt));
return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers);
}
@@ -37,7 +37,7 @@ CXXTryStmt *CXXTryStmt::Create(const ASTContext &C, EmptyShell Empty,
std::size_t Size = sizeof(CXXTryStmt);
Size += ((numHandlers + 1) * sizeof(Stmt *));
- void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
+ void *Mem = C.Allocate(Size, alignof(CXXTryStmt));
return new (Mem) CXXTryStmt(Empty, numHandlers);
}
diff --git a/lib/AST/StmtObjC.cpp b/lib/AST/StmtObjC.cpp
index a77550c7605d..eea03f64c2fe 100644
--- a/lib/AST/StmtObjC.cpp
+++ b/lib/AST/StmtObjC.cpp
@@ -50,7 +50,7 @@ ObjCAtTryStmt *ObjCAtTryStmt::Create(const ASTContext &Context,
unsigned Size =
sizeof(ObjCAtTryStmt) +
(1 + NumCatchStmts + (atFinallyStmt != nullptr)) * sizeof(Stmt *);
- void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
+ void *Mem = Context.Allocate(Size, alignof(ObjCAtTryStmt));
return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts,
atFinallyStmt);
}
@@ -60,7 +60,7 @@ ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(const ASTContext &Context,
bool HasFinally) {
unsigned Size =
sizeof(ObjCAtTryStmt) + (1 + NumCatchStmts + HasFinally) * sizeof(Stmt *);
- void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
+ void *Mem = Context.Allocate(Size, alignof(ObjCAtTryStmt));
return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
}
diff --git a/lib/AST/StmtOpenMP.cpp b/lib/AST/StmtOpenMP.cpp
index f1ddedb2b0f2..0a90740162b9 100644
--- a/lib/AST/StmtOpenMP.cpp
+++ b/lib/AST/StmtOpenMP.cpp
@@ -58,7 +58,7 @@ OMPParallelDirective *OMPParallelDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
unsigned Size =
- llvm::alignTo(sizeof(OMPParallelDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPParallelDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPParallelDirective *Dir =
@@ -73,7 +73,7 @@ OMPParallelDirective *OMPParallelDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPParallelDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPParallelDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPParallelDirective(NumClauses);
@@ -84,8 +84,7 @@ OMPSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPSimdDirective), llvm::alignOf<OMPClause *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPSimdDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
@@ -113,8 +112,7 @@ OMPSimdDirective *OMPSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPSimdDirective), llvm::alignOf<OMPClause *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPSimdDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_simd));
@@ -126,8 +124,7 @@ OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, bool HasCancel) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPForDirective), llvm::alignOf<OMPClause *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPForDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
@@ -166,8 +163,7 @@ OMPForDirective *OMPForDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPForDirective), llvm::alignOf<OMPClause *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPForDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for));
@@ -180,7 +176,7 @@ OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
unsigned Size =
- llvm::alignTo(sizeof(OMPForSimdDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPForSimdDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
@@ -219,7 +215,7 @@ OMPForSimdDirective *OMPForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned CollapsedNum,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPForSimdDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPForSimdDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_for_simd));
@@ -230,7 +226,7 @@ OMPSectionsDirective *OMPSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
unsigned Size =
- llvm::alignTo(sizeof(OMPSectionsDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPSectionsDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPSectionsDirective *Dir =
@@ -245,7 +241,7 @@ OMPSectionsDirective *OMPSectionsDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPSectionsDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPSectionsDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPSectionsDirective(NumClauses);
@@ -256,8 +252,7 @@ OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C,
SourceLocation EndLoc,
Stmt *AssociatedStmt,
bool HasCancel) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPSectionDirective), llvm::alignOf<Stmt *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPSectionDirective), alignof(Stmt *));
void *Mem = C.Allocate(Size + sizeof(Stmt *));
OMPSectionDirective *Dir = new (Mem) OMPSectionDirective(StartLoc, EndLoc);
Dir->setAssociatedStmt(AssociatedStmt);
@@ -267,8 +262,7 @@ OMPSectionDirective *OMPSectionDirective::Create(const ASTContext &C,
OMPSectionDirective *OMPSectionDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPSectionDirective), llvm::alignOf<Stmt *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPSectionDirective), alignof(Stmt *));
void *Mem = C.Allocate(Size + sizeof(Stmt *));
return new (Mem) OMPSectionDirective();
}
@@ -279,7 +273,7 @@ OMPSingleDirective *OMPSingleDirective::Create(const ASTContext &C,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
unsigned Size =
- llvm::alignTo(sizeof(OMPSingleDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPSingleDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPSingleDirective *Dir =
@@ -293,7 +287,7 @@ OMPSingleDirective *OMPSingleDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPSingleDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPSingleDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPSingleDirective(NumClauses);
@@ -303,8 +297,7 @@ OMPMasterDirective *OMPMasterDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPMasterDirective), llvm::alignOf<Stmt *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPMasterDirective), alignof(Stmt *));
void *Mem = C.Allocate(Size + sizeof(Stmt *));
OMPMasterDirective *Dir = new (Mem) OMPMasterDirective(StartLoc, EndLoc);
Dir->setAssociatedStmt(AssociatedStmt);
@@ -313,8 +306,7 @@ OMPMasterDirective *OMPMasterDirective::Create(const ASTContext &C,
OMPMasterDirective *OMPMasterDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPMasterDirective), llvm::alignOf<Stmt *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPMasterDirective), alignof(Stmt *));
void *Mem = C.Allocate(Size + sizeof(Stmt *));
return new (Mem) OMPMasterDirective();
}
@@ -324,7 +316,7 @@ OMPCriticalDirective *OMPCriticalDirective::Create(
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
unsigned Size =
- llvm::alignTo(sizeof(OMPCriticalDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPCriticalDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPCriticalDirective *Dir =
@@ -338,7 +330,7 @@ OMPCriticalDirective *OMPCriticalDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPCriticalDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPCriticalDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPCriticalDirective(NumClauses);
@@ -348,8 +340,8 @@ OMPParallelForDirective *OMPParallelForDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, bool HasCancel) {
- unsigned Size = llvm::alignTo(sizeof(OMPParallelForDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPParallelForDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) *
numLoopChildren(CollapsedNum, OMPD_parallel_for));
@@ -387,8 +379,8 @@ OMPParallelForDirective *OMPParallelForDirective::Create(
OMPParallelForDirective *
OMPParallelForDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPParallelForDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPParallelForDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) *
numLoopChildren(CollapsedNum, OMPD_parallel_for));
@@ -399,8 +391,8 @@ OMPParallelForSimdDirective *OMPParallelForSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size = llvm::alignTo(sizeof(OMPParallelForSimdDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPParallelForSimdDirective), alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
@@ -438,8 +430,8 @@ OMPParallelForSimdDirective *
OMPParallelForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPParallelForSimdDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPParallelForSimdDirective), alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_parallel_for_simd));
@@ -449,8 +441,8 @@ OMPParallelForSimdDirective::CreateEmpty(const ASTContext &C,
OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel) {
- unsigned Size = llvm::alignTo(sizeof(OMPParallelSectionsDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPParallelSectionsDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPParallelSectionsDirective *Dir =
@@ -464,8 +456,8 @@ OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
OMPParallelSectionsDirective *
OMPParallelSectionsDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPParallelSectionsDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPParallelSectionsDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPParallelSectionsDirective(NumClauses);
@@ -475,8 +467,7 @@ OMPTaskDirective *
OMPTaskDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTaskDirective), llvm::alignOf<OMPClause *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPTaskDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPTaskDirective *Dir =
@@ -490,8 +481,7 @@ OMPTaskDirective::Create(const ASTContext &C, SourceLocation StartLoc,
OMPTaskDirective *OMPTaskDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTaskDirective), llvm::alignOf<OMPClause *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPTaskDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPTaskDirective(NumClauses);
@@ -544,8 +534,7 @@ OMPTaskgroupDirective *OMPTaskgroupDirective::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTaskgroupDirective), llvm::alignOf<Stmt *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPTaskgroupDirective), alignof(Stmt *));
void *Mem = C.Allocate(Size + sizeof(Stmt *));
OMPTaskgroupDirective *Dir =
new (Mem) OMPTaskgroupDirective(StartLoc, EndLoc);
@@ -555,8 +544,7 @@ OMPTaskgroupDirective *OMPTaskgroupDirective::Create(const ASTContext &C,
OMPTaskgroupDirective *OMPTaskgroupDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
- unsigned Size =
- llvm::alignTo(sizeof(OMPTaskgroupDirective), llvm::alignOf<Stmt *>());
+ unsigned Size = llvm::alignTo(sizeof(OMPTaskgroupDirective), alignof(Stmt *));
void *Mem = C.Allocate(Size + sizeof(Stmt *));
return new (Mem) OMPTaskgroupDirective();
}
@@ -564,8 +552,8 @@ OMPTaskgroupDirective *OMPTaskgroupDirective::CreateEmpty(const ASTContext &C,
OMPCancellationPointDirective *OMPCancellationPointDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion) {
- unsigned Size = llvm::alignTo(sizeof(OMPCancellationPointDirective),
- llvm::alignOf<Stmt *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPCancellationPointDirective), alignof(Stmt *));
void *Mem = C.Allocate(Size);
OMPCancellationPointDirective *Dir =
new (Mem) OMPCancellationPointDirective(StartLoc, EndLoc);
@@ -575,8 +563,8 @@ OMPCancellationPointDirective *OMPCancellationPointDirective::Create(
OMPCancellationPointDirective *
OMPCancellationPointDirective::CreateEmpty(const ASTContext &C, EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPCancellationPointDirective),
- llvm::alignOf<Stmt *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPCancellationPointDirective), alignof(Stmt *));
void *Mem = C.Allocate(Size);
return new (Mem) OMPCancellationPointDirective();
}
@@ -587,7 +575,7 @@ OMPCancelDirective::Create(const ASTContext &C, SourceLocation StartLoc,
OpenMPDirectiveKind CancelRegion) {
unsigned Size = llvm::alignTo(sizeof(OMPCancelDirective) +
sizeof(OMPClause *) * Clauses.size(),
- llvm::alignOf<Stmt *>());
+ alignof(Stmt *));
void *Mem = C.Allocate(Size);
OMPCancelDirective *Dir =
new (Mem) OMPCancelDirective(StartLoc, EndLoc, Clauses.size());
@@ -601,7 +589,7 @@ OMPCancelDirective *OMPCancelDirective::CreateEmpty(const ASTContext &C,
EmptyShell) {
unsigned Size = llvm::alignTo(sizeof(OMPCancelDirective) +
sizeof(OMPClause *) * NumClauses,
- llvm::alignOf<Stmt *>());
+ alignof(Stmt *));
void *Mem = C.Allocate(Size);
return new (Mem) OMPCancelDirective(NumClauses);
}
@@ -611,7 +599,7 @@ OMPFlushDirective *OMPFlushDirective::Create(const ASTContext &C,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses) {
unsigned Size =
- llvm::alignTo(sizeof(OMPFlushDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPFlushDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size());
OMPFlushDirective *Dir =
new (Mem) OMPFlushDirective(StartLoc, EndLoc, Clauses.size());
@@ -623,7 +611,7 @@ OMPFlushDirective *OMPFlushDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPFlushDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPFlushDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses);
return new (Mem) OMPFlushDirective(NumClauses);
}
@@ -634,7 +622,7 @@ OMPOrderedDirective *OMPOrderedDirective::Create(const ASTContext &C,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
unsigned Size =
- llvm::alignTo(sizeof(OMPOrderedDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPOrderedDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(Stmt *) + sizeof(OMPClause *) * Clauses.size());
OMPOrderedDirective *Dir =
@@ -648,7 +636,7 @@ OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPOrderedDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPOrderedDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(Stmt *) + sizeof(OMPClause *) * NumClauses);
return new (Mem) OMPOrderedDirective(NumClauses);
@@ -659,7 +647,7 @@ OMPAtomicDirective *OMPAtomicDirective::Create(
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) {
unsigned Size =
- llvm::alignTo(sizeof(OMPAtomicDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPAtomicDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
5 * sizeof(Stmt *));
OMPAtomicDirective *Dir =
@@ -679,7 +667,7 @@ OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPAtomicDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPAtomicDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + 5 * sizeof(Stmt *));
return new (Mem) OMPAtomicDirective(NumClauses);
@@ -691,7 +679,7 @@ OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
unsigned Size =
- llvm::alignTo(sizeof(OMPTargetDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPTargetDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPTargetDirective *Dir =
@@ -705,7 +693,7 @@ OMPTargetDirective *OMPTargetDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPTargetDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPTargetDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPTargetDirective(NumClauses);
@@ -714,8 +702,8 @@ OMPTargetDirective *OMPTargetDirective::CreateEmpty(const ASTContext &C,
OMPTargetParallelDirective *OMPTargetParallelDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
- unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPTargetParallelDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPTargetParallelDirective *Dir =
@@ -728,8 +716,8 @@ OMPTargetParallelDirective *OMPTargetParallelDirective::Create(
OMPTargetParallelDirective *
OMPTargetParallelDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPTargetParallelDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPTargetParallelDirective(NumClauses);
@@ -740,7 +728,7 @@ OMPTargetParallelForDirective *OMPTargetParallelForDirective::Create(
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs, bool HasCancel) {
unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelForDirective),
- llvm::alignOf<OMPClause *>());
+ alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_target_parallel_for));
@@ -780,7 +768,7 @@ OMPTargetParallelForDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelForDirective),
- llvm::alignOf<OMPClause *>());
+ alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_target_parallel_for));
@@ -790,9 +778,9 @@ OMPTargetParallelForDirective::CreateEmpty(const ASTContext &C,
OMPTargetDataDirective *OMPTargetDataDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
- void *Mem = C.Allocate(llvm::alignTo(sizeof(OMPTargetDataDirective),
- llvm::alignOf<OMPClause *>()) +
- sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ void *Mem = C.Allocate(
+ llvm::alignTo(sizeof(OMPTargetDataDirective), alignof(OMPClause *)) +
+ sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPTargetDataDirective *Dir =
new (Mem) OMPTargetDataDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
@@ -803,18 +791,18 @@ OMPTargetDataDirective *OMPTargetDataDirective::Create(
OMPTargetDataDirective *OMPTargetDataDirective::CreateEmpty(const ASTContext &C,
unsigned N,
EmptyShell) {
- void *Mem = C.Allocate(llvm::alignTo(sizeof(OMPTargetDataDirective),
- llvm::alignOf<OMPClause *>()) +
- sizeof(OMPClause *) * N + sizeof(Stmt *));
+ void *Mem = C.Allocate(
+ llvm::alignTo(sizeof(OMPTargetDataDirective), alignof(OMPClause *)) +
+ sizeof(OMPClause *) * N + sizeof(Stmt *));
return new (Mem) OMPTargetDataDirective(N);
}
OMPTargetEnterDataDirective *OMPTargetEnterDataDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses) {
- void *Mem = C.Allocate(llvm::alignTo(sizeof(OMPTargetEnterDataDirective),
- llvm::alignOf<OMPClause *>()) +
- sizeof(OMPClause *) * Clauses.size());
+ void *Mem = C.Allocate(
+ llvm::alignTo(sizeof(OMPTargetEnterDataDirective), alignof(OMPClause *)) +
+ sizeof(OMPClause *) * Clauses.size());
OMPTargetEnterDataDirective *Dir =
new (Mem) OMPTargetEnterDataDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
@@ -824,9 +812,9 @@ OMPTargetEnterDataDirective *OMPTargetEnterDataDirective::Create(
OMPTargetEnterDataDirective *
OMPTargetEnterDataDirective::CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell) {
- void *Mem = C.Allocate(llvm::alignTo(sizeof(OMPTargetEnterDataDirective),
- llvm::alignOf<OMPClause *>()) +
- sizeof(OMPClause *) * N);
+ void *Mem = C.Allocate(
+ llvm::alignTo(sizeof(OMPTargetEnterDataDirective), alignof(OMPClause *)) +
+ sizeof(OMPClause *) * N);
return new (Mem) OMPTargetEnterDataDirective(N);
}
@@ -834,9 +822,9 @@ OMPTargetExitDataDirective *
OMPTargetExitDataDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses) {
- void *Mem = C.Allocate(llvm::alignTo(sizeof(OMPTargetExitDataDirective),
- llvm::alignOf<OMPClause *>()) +
- sizeof(OMPClause *) * Clauses.size());
+ void *Mem = C.Allocate(
+ llvm::alignTo(sizeof(OMPTargetExitDataDirective), alignof(OMPClause *)) +
+ sizeof(OMPClause *) * Clauses.size());
OMPTargetExitDataDirective *Dir =
new (Mem) OMPTargetExitDataDirective(StartLoc, EndLoc, Clauses.size());
Dir->setClauses(Clauses);
@@ -846,9 +834,9 @@ OMPTargetExitDataDirective::Create(const ASTContext &C, SourceLocation StartLoc,
OMPTargetExitDataDirective *
OMPTargetExitDataDirective::CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell) {
- void *Mem = C.Allocate(llvm::alignTo(sizeof(OMPTargetExitDataDirective),
- llvm::alignOf<OMPClause *>()) +
- sizeof(OMPClause *) * N);
+ void *Mem = C.Allocate(
+ llvm::alignTo(sizeof(OMPTargetExitDataDirective), alignof(OMPClause *)) +
+ sizeof(OMPClause *) * N);
return new (Mem) OMPTargetExitDataDirective(N);
}
@@ -858,7 +846,7 @@ OMPTeamsDirective *OMPTeamsDirective::Create(const ASTContext &C,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt) {
unsigned Size =
- llvm::alignTo(sizeof(OMPTeamsDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPTeamsDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
OMPTeamsDirective *Dir =
@@ -872,7 +860,7 @@ OMPTeamsDirective *OMPTeamsDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPTeamsDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPTeamsDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
return new (Mem) OMPTeamsDirective(NumClauses);
@@ -883,7 +871,7 @@ OMPTaskLoopDirective *OMPTaskLoopDirective::Create(
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
unsigned Size =
- llvm::alignTo(sizeof(OMPTaskLoopDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPTaskLoopDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_taskloop));
@@ -922,7 +910,7 @@ OMPTaskLoopDirective *OMPTaskLoopDirective::CreateEmpty(const ASTContext &C,
unsigned CollapsedNum,
EmptyShell) {
unsigned Size =
- llvm::alignTo(sizeof(OMPTaskLoopDirective), llvm::alignOf<OMPClause *>());
+ llvm::alignTo(sizeof(OMPTaskLoopDirective), alignof(OMPClause *));
void *Mem =
C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_taskloop));
@@ -933,8 +921,8 @@ OMPTaskLoopSimdDirective *OMPTaskLoopSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size = llvm::alignTo(sizeof(OMPTaskLoopSimdDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPTaskLoopSimdDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) *
numLoopChildren(CollapsedNum, OMPD_taskloop_simd));
@@ -971,8 +959,8 @@ OMPTaskLoopSimdDirective *OMPTaskLoopSimdDirective::Create(
OMPTaskLoopSimdDirective *
OMPTaskLoopSimdDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPTaskLoopSimdDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPTaskLoopSimdDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) *
numLoopChildren(CollapsedNum, OMPD_taskloop_simd));
@@ -983,8 +971,8 @@ OMPDistributeDirective *OMPDistributeDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size = llvm::alignTo(sizeof(OMPDistributeDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPDistributeDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) *
numLoopChildren(CollapsedNum, OMPD_distribute));
@@ -1021,8 +1009,8 @@ OMPDistributeDirective *OMPDistributeDirective::Create(
OMPDistributeDirective *
OMPDistributeDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPDistributeDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPDistributeDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) *
numLoopChildren(CollapsedNum, OMPD_distribute));
@@ -1033,8 +1021,8 @@ OMPTargetUpdateDirective *
OMPTargetUpdateDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses) {
- unsigned Size = llvm::alignTo(sizeof(OMPTargetUpdateDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPTargetUpdateDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size());
OMPTargetUpdateDirective *Dir =
new (Mem) OMPTargetUpdateDirective(StartLoc, EndLoc, Clauses.size());
@@ -1045,8 +1033,8 @@ OMPTargetUpdateDirective::Create(const ASTContext &C, SourceLocation StartLoc,
OMPTargetUpdateDirective *
OMPTargetUpdateDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPTargetUpdateDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPTargetUpdateDirective), alignof(OMPClause *));
void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses);
return new (Mem) OMPTargetUpdateDirective(NumClauses);
}
@@ -1056,7 +1044,7 @@ OMPDistributeParallelForDirective *OMPDistributeParallelForDirective::Create(
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
unsigned Size = llvm::alignTo(sizeof(OMPDistributeParallelForDirective),
- llvm::alignOf<OMPClause *>());
+ alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) *
@@ -1098,7 +1086,7 @@ OMPDistributeParallelForDirective::CreateEmpty(const ASTContext &C,
unsigned CollapsedNum,
EmptyShell) {
unsigned Size = llvm::alignTo(sizeof(OMPDistributeParallelForDirective),
- llvm::alignOf<OMPClause *>());
+ alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) *
@@ -1112,7 +1100,7 @@ OMPDistributeParallelForSimdDirective::Create(
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
unsigned Size = llvm::alignTo(sizeof(OMPDistributeParallelForSimdDirective),
- llvm::alignOf<OMPClause *>());
+ alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) *
@@ -1154,7 +1142,7 @@ OMPDistributeParallelForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned CollapsedNum,
EmptyShell) {
unsigned Size = llvm::alignTo(sizeof(OMPDistributeParallelForSimdDirective),
- llvm::alignOf<OMPClause *>());
+ alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) *
@@ -1167,8 +1155,8 @@ OMPDistributeSimdDirective *OMPDistributeSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
- unsigned Size = llvm::alignTo(sizeof(OMPDistributeSimdDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPDistributeSimdDirective), alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) *
@@ -1207,8 +1195,8 @@ OMPDistributeSimdDirective *
OMPDistributeSimdDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell) {
- unsigned Size = llvm::alignTo(sizeof(OMPDistributeSimdDirective),
- llvm::alignOf<OMPClause *>());
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPDistributeSimdDirective), alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) *
@@ -1221,7 +1209,7 @@ OMPTargetParallelForSimdDirective *OMPTargetParallelForSimdDirective::Create(
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
const HelperExprs &Exprs) {
unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelForSimdDirective),
- llvm::alignOf<OMPClause *>());
+ alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * Clauses.size() +
sizeof(Stmt *) *
@@ -1263,10 +1251,411 @@ OMPTargetParallelForSimdDirective::CreateEmpty(const ASTContext &C,
unsigned CollapsedNum,
EmptyShell) {
unsigned Size = llvm::alignTo(sizeof(OMPTargetParallelForSimdDirective),
- llvm::alignOf<OMPClause *>());
+ alignof(OMPClause *));
void *Mem = C.Allocate(
Size + sizeof(OMPClause *) * NumClauses +
sizeof(Stmt *) *
numLoopChildren(CollapsedNum, OMPD_target_parallel_for_simd));
return new (Mem) OMPTargetParallelForSimdDirective(CollapsedNum, NumClauses);
}
+
+OMPTargetSimdDirective *
+OMPTargetSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, unsigned CollapsedNum,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, const HelperExprs &Exprs) {
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPTargetSimdDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_target_simd));
+ OMPTargetSimdDirective *Dir = new (Mem)
+ OMPTargetSimdDirective(StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTargetSimdDirective *
+OMPTargetSimdDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPTargetSimdDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_target_simd));
+ return new (Mem) OMPTargetSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPTeamsDistributeDirective *OMPTeamsDistributeDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPTeamsDistributeDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_teams_distribute));
+ OMPTeamsDistributeDirective *Dir = new (Mem) OMPTeamsDistributeDirective(
+ StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTeamsDistributeDirective *
+OMPTeamsDistributeDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ unsigned Size =
+ llvm::alignTo(sizeof(OMPTeamsDistributeDirective), alignof(OMPClause *));
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) * numLoopChildren(CollapsedNum, OMPD_teams_distribute));
+ return new (Mem) OMPTeamsDistributeDirective(CollapsedNum, NumClauses);
+}
+
+OMPTeamsDistributeSimdDirective *OMPTeamsDistributeSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ unsigned Size = llvm::alignTo(sizeof(OMPTeamsDistributeSimdDirective),
+ alignof(OMPClause *));
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute_simd));
+ OMPTeamsDistributeSimdDirective *Dir =
+ new (Mem) OMPTeamsDistributeSimdDirective(StartLoc, EndLoc, CollapsedNum,
+ Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTeamsDistributeSimdDirective *OMPTeamsDistributeSimdDirective::CreateEmpty(
+ const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
+ EmptyShell) {
+ unsigned Size = llvm::alignTo(sizeof(OMPTeamsDistributeSimdDirective),
+ alignof(OMPClause *));
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute_simd));
+ return new (Mem) OMPTeamsDistributeSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPTeamsDistributeParallelForSimdDirective *
+OMPTeamsDistributeParallelForSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto Size = llvm::alignTo(sizeof(OMPTeamsDistributeParallelForSimdDirective),
+ alignof(OMPClause *));
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum,
+ OMPD_teams_distribute_parallel_for_simd));
+ OMPTeamsDistributeParallelForSimdDirective *Dir = new (Mem)
+ OMPTeamsDistributeParallelForSimdDirective(StartLoc, EndLoc, CollapsedNum,
+ Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTeamsDistributeParallelForSimdDirective *
+OMPTeamsDistributeParallelForSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ auto Size = llvm::alignTo(sizeof(OMPTeamsDistributeParallelForSimdDirective),
+ alignof(OMPClause *));
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum,
+ OMPD_teams_distribute_parallel_for_simd));
+ return new (Mem)
+ OMPTeamsDistributeParallelForSimdDirective(CollapsedNum, NumClauses);
+}
+
+OMPTeamsDistributeParallelForDirective *
+OMPTeamsDistributeParallelForDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto Size = llvm::alignTo(sizeof(OMPTeamsDistributeParallelForDirective),
+ alignof(OMPClause *));
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute_parallel_for));
+ OMPTeamsDistributeParallelForDirective *Dir = new (Mem)
+ OMPTeamsDistributeParallelForDirective(StartLoc, EndLoc, CollapsedNum,
+ Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTeamsDistributeParallelForDirective *
+OMPTeamsDistributeParallelForDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ auto Size = llvm::alignTo(sizeof(OMPTeamsDistributeParallelForDirective),
+ alignof(OMPClause *));
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_teams_distribute_parallel_for));
+ return new (Mem)
+ OMPTeamsDistributeParallelForDirective(CollapsedNum, NumClauses);
+}
+
+OMPTargetTeamsDirective *OMPTargetTeamsDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt) {
+ auto Size =
+ llvm::alignTo(sizeof(OMPTargetTeamsDirective), alignof(OMPClause *));
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * Clauses.size() + sizeof(Stmt *));
+ OMPTargetTeamsDirective *Dir =
+ new (Mem) OMPTargetTeamsDirective(StartLoc, EndLoc, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ return Dir;
+}
+
+OMPTargetTeamsDirective *
+OMPTargetTeamsDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ EmptyShell) {
+ auto Size =
+ llvm::alignTo(sizeof(OMPTargetTeamsDirective), alignof(OMPClause *));
+ void *Mem =
+ C.Allocate(Size + sizeof(OMPClause *) * NumClauses + sizeof(Stmt *));
+ return new (Mem) OMPTargetTeamsDirective(NumClauses);
+}
+
+OMPTargetTeamsDistributeDirective *OMPTargetTeamsDistributeDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto Size = llvm::alignTo(sizeof(OMPTargetTeamsDistributeDirective),
+ alignof(OMPClause *));
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_target_teams_distribute));
+ OMPTargetTeamsDistributeDirective *Dir =
+ new (Mem) OMPTargetTeamsDistributeDirective(StartLoc, EndLoc, CollapsedNum,
+ Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTargetTeamsDistributeDirective *
+OMPTargetTeamsDistributeDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ auto Size = llvm::alignTo(sizeof(OMPTargetTeamsDistributeDirective),
+ alignof(OMPClause *));
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum, OMPD_target_teams_distribute));
+ return new (Mem) OMPTargetTeamsDistributeDirective(CollapsedNum, NumClauses);
+}
+
+OMPTargetTeamsDistributeParallelForDirective *
+OMPTargetTeamsDistributeParallelForDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto Size =
+ llvm::alignTo(sizeof(OMPTargetTeamsDistributeParallelForDirective),
+ alignof(OMPClause *));
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * Clauses.size() +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum,
+ OMPD_target_teams_distribute_parallel_for));
+ OMPTargetTeamsDistributeParallelForDirective *Dir =
+ new (Mem) OMPTargetTeamsDistributeParallelForDirective(
+ StartLoc, EndLoc, CollapsedNum, Clauses.size());
+ Dir->setClauses(Clauses);
+ Dir->setAssociatedStmt(AssociatedStmt);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
+ Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTargetTeamsDistributeParallelForDirective *
+OMPTargetTeamsDistributeParallelForDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ auto Size =
+ llvm::alignTo(sizeof(OMPTargetTeamsDistributeParallelForDirective),
+ alignof(OMPClause *));
+ void *Mem = C.Allocate(
+ Size + sizeof(OMPClause *) * NumClauses +
+ sizeof(Stmt *) *
+ numLoopChildren(CollapsedNum,
+ OMPD_target_teams_distribute_parallel_for));
+ return new (Mem)
+ OMPTargetTeamsDistributeParallelForDirective(CollapsedNum, NumClauses);
+}
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 8797a13335c4..a8f493dca07d 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -1198,6 +1198,52 @@ void StmtPrinter::VisitOMPTargetParallelForSimdDirective(
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPTargetSimdDirective(OMPTargetSimdDirective *Node) {
+ Indent() << "#pragma omp target simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTeamsDistributeDirective(
+ OMPTeamsDistributeDirective *Node) {
+ Indent() << "#pragma omp teams distribute ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTeamsDistributeSimdDirective(
+ OMPTeamsDistributeSimdDirective *Node) {
+ Indent() << "#pragma omp teams distribute simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTeamsDistributeParallelForSimdDirective(
+ OMPTeamsDistributeParallelForSimdDirective *Node) {
+ Indent() << "#pragma omp teams distribute parallel for simd ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTeamsDistributeParallelForDirective(
+ OMPTeamsDistributeParallelForDirective *Node) {
+ Indent() << "#pragma omp teams distribute parallel for ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTargetTeamsDirective(OMPTargetTeamsDirective *Node) {
+ Indent() << "#pragma omp target teams ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTargetTeamsDistributeDirective(
+ OMPTargetTeamsDistributeDirective *Node) {
+ Indent() << "#pragma omp target teams distribute ";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTargetTeamsDistributeParallelForDirective(
+ OMPTargetTeamsDistributeParallelForDirective *Node) {
+ Indent() << "#pragma omp target teams distribute parallel for ";
+ PrintOMPExecutableDirective(Node);
+}
+
//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
@@ -1676,6 +1722,18 @@ void StmtPrinter::VisitInitListExpr(InitListExpr* Node) {
OS << "}";
}
+void StmtPrinter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *Node) {
+ // There's no way to express this expression in any of our supported
+ // languages, so just emit something terse and (hopefully) clear.
+ OS << "{";
+ PrintExpr(Node->getSubExpr());
+ OS << "}";
+}
+
+void StmtPrinter::VisitArrayInitIndexExpr(ArrayInitIndexExpr *Node) {
+ OS << "*";
+}
+
void StmtPrinter::VisitParenListExpr(ParenListExpr* Node) {
OS << "(";
for (unsigned i = 0, e = Node->getNumExprs(); i != e; ++i) {
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index 0a39413853a0..dd59a9b96c98 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -93,10 +93,6 @@ void StmtProfiler::VisitCompoundStmt(const CompoundStmt *S) {
VisitStmt(S);
}
-void StmtProfiler::VisitSwitchCase(const SwitchCase *S) {
- VisitStmt(S);
-}
-
void StmtProfiler::VisitCaseStmt(const CaseStmt *S) {
VisitStmt(S);
}
@@ -727,6 +723,46 @@ void StmtProfiler::VisitOMPTargetParallelForSimdDirective(
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPTargetSimdDirective(
+ const OMPTargetSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTeamsDistributeDirective(
+ const OMPTeamsDistributeDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTeamsDistributeSimdDirective(
+ const OMPTeamsDistributeSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTeamsDistributeParallelForSimdDirective(
+ const OMPTeamsDistributeParallelForSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTeamsDistributeParallelForDirective(
+ const OMPTeamsDistributeParallelForDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTargetTeamsDirective(
+ const OMPTargetTeamsDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPTargetTeamsDistributeDirective(
+ const OMPTargetTeamsDistributeDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTargetTeamsDistributeParallelForDirective(
+ const OMPTargetTeamsDistributeParallelForDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
@@ -951,6 +987,14 @@ void StmtProfiler::VisitDesignatedInitUpdateExpr(
"initializer");
}
+void StmtProfiler::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitArrayInitIndexExpr(const ArrayInitIndexExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitNoInitExpr(const NoInitExpr *S) {
llvm_unreachable("Unexpected NoInitExpr in syntactic form of initializer");
}
@@ -1184,6 +1228,12 @@ void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
if (S->isTypeDependent()) {
// Type-dependent operator calls are profiled like their underlying
// syntactic operator.
+ //
+ // An operator call to operator-> is always implicit, so just skip it. The
+ // enclosing MemberExpr will profile the actual member access.
+ if (S->getOperator() == OO_Arrow)
+ return Visit(S->getArg(0));
+
UnaryOperatorKind UnaryOp = UO_Extension;
BinaryOperatorKind BinaryOp = BO_Comma;
Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp);
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index b75ede862f7a..099f939c7a75 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -243,6 +243,31 @@ Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const {
return None;
}
+QualType TemplateArgument::getNonTypeTemplateArgumentType() const {
+ switch (getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Type:
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::Pack:
+ return QualType();
+
+ case TemplateArgument::Integral:
+ return getIntegralType();
+
+ case TemplateArgument::Expression:
+ return getAsExpr()->getType();
+
+ case TemplateArgument::Declaration:
+ return getParamTypeForDecl();
+
+ case TemplateArgument::NullPtr:
+ return getNullPtrType();
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Context) const {
ID.AddInteger(getKind());
@@ -530,7 +555,7 @@ const ASTTemplateArgumentListInfo *
ASTTemplateArgumentListInfo::Create(ASTContext &C,
const TemplateArgumentListInfo &List) {
std::size_t size = totalSizeToAlloc<TemplateArgumentLoc>(List.size());
- void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>());
+ void *Mem = C.Allocate(size, alignof(ASTTemplateArgumentListInfo));
return new (Mem) ASTTemplateArgumentListInfo(List);
}
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 99b024701aa3..0d0cd2e305be 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/Type.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/CharUnits.h"
@@ -19,13 +20,11 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/PrettyPrinter.h"
-#include "clang/AST/Type.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace clang;
@@ -532,6 +531,18 @@ bool Type::isObjCInertUnsafeUnretainedType() const {
}
}
+ObjCTypeParamType::ObjCTypeParamType(const ObjCTypeParamDecl *D,
+ QualType can,
+ ArrayRef<ObjCProtocolDecl *> protocols)
+ : Type(ObjCTypeParam, can, can->isDependentType(),
+ can->isInstantiationDependentType(),
+ can->isVariablyModifiedType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ OTPDecl(const_cast<ObjCTypeParamDecl*>(D))
+{
+ initialize(protocols);
+}
+
ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
ArrayRef<QualType> typeArgs,
ArrayRef<ObjCProtocolDecl *> protocols,
@@ -547,15 +558,9 @@ ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
ObjCObjectTypeBits.NumTypeArgs = typeArgs.size();
assert(getTypeArgsAsWritten().size() == typeArgs.size() &&
"bitfield overflow in type argument count");
- ObjCObjectTypeBits.NumProtocols = protocols.size();
- assert(getNumProtocols() == protocols.size() &&
- "bitfield overflow in protocol count");
if (!typeArgs.empty())
memcpy(getTypeArgStorage(), typeArgs.data(),
typeArgs.size() * sizeof(QualType));
- if (!protocols.empty())
- memcpy(getProtocolStorage(), protocols.data(),
- protocols.size() * sizeof(ObjCProtocolDecl*));
for (auto typeArg : typeArgs) {
if (typeArg->isDependentType())
@@ -566,6 +571,9 @@ ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
if (typeArg->containsUnexpandedParameterPack())
setContainsUnexpandedParameterPack();
}
+ // Initialize the protocol qualifiers. The protocol storage is known
+ // after we set number of type arguments.
+ initialize(protocols);
}
bool ObjCObjectType::isSpecialized() const {
@@ -883,6 +891,7 @@ public:
}
TRIVIAL_TYPE_CLASS(Typedef)
+ TRIVIAL_TYPE_CLASS(ObjCTypeParam)
QualType VisitAdjustedType(const AdjustedType *T) {
QualType originalType = recurse(T->getOriginalType());
@@ -1048,7 +1057,7 @@ QualType simpleTransform(ASTContext &ctx, QualType type, F &&f) {
SplitQualType splitType = type.split();
// Visit the type itself.
- SimpleTransformVisitor<F> visitor(ctx, std::move(f));
+ SimpleTransformVisitor<F> visitor(ctx, std::forward<F>(f));
QualType result = visitor.Visit(splitType.Ty);
if (result.isNull())
return result;
@@ -1072,13 +1081,24 @@ QualType QualType::substObjCTypeArgs(
// Replace an Objective-C type parameter reference with the corresponding
// type argument.
- if (const auto *typedefTy = dyn_cast<TypedefType>(splitType.Ty)) {
- if (auto *typeParam = dyn_cast<ObjCTypeParamDecl>(typedefTy->getDecl())) {
+ if (const auto *OTPTy = dyn_cast<ObjCTypeParamType>(splitType.Ty)) {
+ if (auto *typeParam = dyn_cast<ObjCTypeParamDecl>(OTPTy->getDecl())) {
// If we have type arguments, use them.
if (!typeArgs.empty()) {
- // FIXME: Introduce SubstObjCTypeParamType ?
QualType argType = typeArgs[typeParam->getIndex()];
- return ctx.getQualifiedType(argType, splitType.Quals);
+ if (OTPTy->qual_empty())
+ return ctx.getQualifiedType(argType, splitType.Quals);
+
+ // Apply protocol lists if exists.
+ bool hasError;
+ SmallVector<ObjCProtocolDecl*, 8> protocolsVec;
+ protocolsVec.append(OTPTy->qual_begin(),
+ OTPTy->qual_end());
+ ArrayRef<ObjCProtocolDecl *> protocolsToApply = protocolsVec;
+ QualType resultTy = ctx.applyObjCProtocolQualifiers(argType,
+ protocolsToApply, hasError, true/*allowOnPointerType*/);
+
+ return ctx.getQualifiedType(resultTy, splitType.Quals);
}
switch (context) {
@@ -2317,6 +2337,15 @@ bool QualType::isCXX11PODType(const ASTContext &Context) const {
return false;
}
+bool Type::isAlignValT() const {
+ if (auto *ET = getAs<EnumType>()) {
+ auto *II = ET->getDecl()->getIdentifier();
+ if (II && II->isStr("align_val_t") && ET->getDecl()->isInStdNamespace())
+ return true;
+ }
+ return false;
+}
+
bool Type::isPromotableIntegerType() const {
if (const BuiltinType *BT = getAs<BuiltinType>())
switch (BT->getKind()) {
@@ -2638,6 +2667,7 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_X86VectorCall: return "vectorcall";
case CC_X86_64Win64: return "ms_abi";
case CC_X86_64SysV: return "sysv_abi";
+ case CC_X86RegCall : return "regcall";
case CC_AAPCS: return "aapcs";
case CC_AAPCS_VFP: return "aapcs-vfp";
case CC_IntelOclBicc: return "intel_ocl_bicc";
@@ -2688,8 +2718,9 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
QualType *exnSlot = argSlot + NumParams;
unsigned I = 0;
for (QualType ExceptionType : epi.ExceptionSpec.Exceptions) {
- // Note that a dependent exception specification does *not* make
- // a type dependent; it's not even part of the C++ type system.
+ // Note that, before C++17, a dependent exception specification does
+ // *not* make a type dependent; it's not even part of the C++ type
+ // system.
if (ExceptionType->isInstantiationDependentType())
setInstantiationDependent();
@@ -2728,6 +2759,19 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
slot[0] = epi.ExceptionSpec.SourceDecl;
}
+ // If this is a canonical type, and its exception specification is dependent,
+ // then it's a dependent type. This only happens in C++17 onwards.
+ if (isCanonicalUnqualified()) {
+ if (getExceptionSpecType() == EST_Dynamic ||
+ getExceptionSpecType() == EST_ComputedNoexcept) {
+ assert(hasDependentExceptionSpec() && "type should not be canonical");
+ setDependent();
+ }
+ } else if (getCanonicalTypeInternal()->isDependentType()) {
+ // Ask our canonical type whether our exception specification was dependent.
+ setDependent();
+ }
+
if (epi.ExtParameterInfos) {
ExtParameterInfo *extParamInfos =
const_cast<ExtParameterInfo *>(getExtParameterInfosBuffer());
@@ -2748,6 +2792,15 @@ bool FunctionProtoType::hasDependentExceptionSpec() const {
return false;
}
+bool FunctionProtoType::hasInstantiationDependentExceptionSpec() const {
+ if (Expr *NE = getNoexceptExpr())
+ return NE->isInstantiationDependent();
+ for (QualType ET : exceptions())
+ if (ET->isInstantiationDependentType())
+ return true;
+ return false;
+}
+
FunctionProtoType::NoexceptResult
FunctionProtoType::getNoexceptSpec(const ASTContext &ctx) const {
ExceptionSpecificationType est = getExceptionSpecType();
@@ -2772,29 +2825,28 @@ FunctionProtoType::getNoexceptSpec(const ASTContext &ctx) const {
return value.getBoolValue() ? NR_Nothrow : NR_Throw;
}
-bool FunctionProtoType::isNothrow(const ASTContext &Ctx,
- bool ResultIfDependent) const {
+CanThrowResult FunctionProtoType::canThrow(const ASTContext &Ctx) const {
ExceptionSpecificationType EST = getExceptionSpecType();
assert(EST != EST_Unevaluated && EST != EST_Uninstantiated);
if (EST == EST_DynamicNone || EST == EST_BasicNoexcept)
- return true;
+ return CT_Cannot;
- if (EST == EST_Dynamic && ResultIfDependent) {
+ if (EST == EST_Dynamic) {
// A dynamic exception specification is throwing unless every exception
// type is an (unexpanded) pack expansion type.
for (unsigned I = 0, N = NumExceptions; I != N; ++I)
if (!getExceptionType(I)->getAs<PackExpansionType>())
- return false;
- return ResultIfDependent;
+ return CT_Can;
+ return CT_Dependent;
}
if (EST != EST_ComputedNoexcept)
- return false;
+ return CT_Can;
NoexceptResult NR = getNoexceptSpec(Ctx);
if (NR == NR_Dependent)
- return ResultIfDependent;
- return NR == NR_Nothrow;
+ return CT_Dependent;
+ return NR == NR_Nothrow ? CT_Cannot : CT_Can;
}
bool FunctionProtoType::isTemplateVariadic() const {
@@ -2808,7 +2860,7 @@ bool FunctionProtoType::isTemplateVariadic() const {
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
const QualType *ArgTys, unsigned NumParams,
const ExtProtoInfo &epi,
- const ASTContext &Context) {
+ const ASTContext &Context, bool Canonical) {
// We have to be careful not to get ambiguous profile encodings.
// Note that valid type pointers are never ambiguous with anything else.
@@ -2847,7 +2899,7 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
ID.AddPointer(Ex.getAsOpaquePtr());
} else if (epi.ExceptionSpec.Type == EST_ComputedNoexcept &&
epi.ExceptionSpec.NoexceptExpr) {
- epi.ExceptionSpec.NoexceptExpr->Profile(ID, Context, false);
+ epi.ExceptionSpec.NoexceptExpr->Profile(ID, Context, Canonical);
} else if (epi.ExceptionSpec.Type == EST_Uninstantiated ||
epi.ExceptionSpec.Type == EST_Unevaluated) {
ID.AddPointer(epi.ExceptionSpec.SourceDecl->getCanonicalDecl());
@@ -2863,7 +2915,7 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
const ASTContext &Ctx) {
Profile(ID, getReturnType(), param_type_begin(), NumParams, getExtProtoInfo(),
- Ctx);
+ Ctx, isCanonicalUnqualified());
}
QualType TypedefType::desugar() const {
@@ -2992,6 +3044,7 @@ bool AttributedType::isQualifier() const {
case AttributedType::attr_fastcall:
case AttributedType::attr_stdcall:
case AttributedType::attr_thiscall:
+ case AttributedType::attr_regcall:
case AttributedType::attr_pascal:
case AttributedType::attr_swiftcall:
case AttributedType::attr_vectorcall:
@@ -3049,6 +3102,7 @@ bool AttributedType::isCallingConv() const {
case attr_fastcall:
case attr_stdcall:
case attr_thiscall:
+ case attr_regcall:
case attr_swiftcall:
case attr_vectorcall:
case attr_pascal:
@@ -3212,6 +3266,20 @@ void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) {
isKindOfTypeAsWritten());
}
+void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID,
+ const ObjCTypeParamDecl *OTPDecl,
+ ArrayRef<ObjCProtocolDecl *> protocols) {
+ ID.AddPointer(OTPDecl);
+ ID.AddInteger(protocols.size());
+ for (auto proto : protocols)
+ ID.AddPointer(proto);
+}
+
+void ObjCTypeParamType::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getDecl(),
+ llvm::makeArrayRef(qual_begin(), getNumProtocols()));
+}
+
namespace {
/// \brief The cached properties of a type.
@@ -3687,10 +3755,18 @@ bool Type::isObjCARCImplicitlyUnretainedType() const {
}
bool Type::isObjCNSObjectType() const {
- if (const TypedefType *typedefType = dyn_cast<TypedefType>(this))
- return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>();
- return false;
+ const Type *cur = this;
+ while (true) {
+ if (const TypedefType *typedefType = dyn_cast<TypedefType>(cur))
+ return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>();
+
+ // Single-step desugar until we run out of sugar.
+ QualType next = cur->getLocallyUnqualifiedSingleStepDesugaredType();
+ if (next.getTypePtr() == cur) return false;
+ cur = next.getTypePtr();
+ }
}
+
bool Type::isObjCIndependentClassType() const {
if (const TypedefType *typedefType = dyn_cast<TypedefType>(this))
return typedefType->getDecl()->hasAttr<ObjCIndependentClassAttr>();
diff --git a/lib/AST/TypeLoc.cpp b/lib/AST/TypeLoc.cpp
index 78947d18f953..7242858f21e6 100644
--- a/lib/AST/TypeLoc.cpp
+++ b/lib/AST/TypeLoc.cpp
@@ -16,10 +16,9 @@
#include "clang/AST/Expr.h"
#include "clang/AST/TypeLocVisitor.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
using namespace clang;
-static const unsigned TypeLocMaxDataAlign = llvm::alignOf<void *>();
+static const unsigned TypeLocMaxDataAlign = alignof(void *);
//===----------------------------------------------------------------------===//
// TypeLoc Implementation
@@ -211,7 +210,7 @@ SourceLocation TypeLoc::getEndLoc() const {
switch (Cur.getTypeLocClass()) {
default:
if (!Last)
- Last = Cur;
+ Last = Cur;
return Last.getLocalSourceRange().getEnd();
case Paren:
case ConstantArray:
@@ -389,6 +388,17 @@ TypeLoc TypeLoc::findExplicitQualifierLoc() const {
return TypeLoc();
}
+void ObjCTypeParamTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setNameLoc(Loc);
+ if (!getNumProtocols()) return;
+
+ setProtocolLAngleLoc(Loc);
+ setProtocolRAngleLoc(Loc);
+ for (unsigned i = 0, e = getNumProtocols(); i != e; ++i)
+ setProtocolLoc(i, Loc);
+}
+
void ObjCObjectTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
setHasBaseTypeAsWritten(true);
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 065a2db09141..cccc90876321 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -194,6 +194,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::DependentName:
case Type::DependentTemplateSpecialization:
case Type::ObjCObject:
+ case Type::ObjCTypeParam:
case Type::ObjCInterface:
case Type::Atomic:
case Type::Pipe:
@@ -724,6 +725,9 @@ void TypePrinter::printFunctionProtoAfter(const FunctionProtoType *T,
case CC_X86_64SysV:
OS << " __attribute__((sysv_abi))";
break;
+ case CC_X86RegCall:
+ OS << " __attribute__((regcall))";
+ break;
case CC_SpirFunction:
case CC_OpenCLKernel:
// Do nothing. These CCs are not available as attributes.
@@ -897,6 +901,10 @@ void TypePrinter::printAtomicAfter(const AtomicType *T, raw_ostream &OS) { }
void TypePrinter::printPipeBefore(const PipeType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
+ if (T->isReadOnly())
+ OS << "read_only ";
+ else
+ OS << "write_only ";
OS << "pipe ";
print(T->getElementType(), OS, StringRef());
spaceBeforePlaceHolder(OS);
@@ -1338,6 +1346,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case AttributedType::attr_pascal: OS << "pascal"; break;
case AttributedType::attr_ms_abi: OS << "ms_abi"; break;
case AttributedType::attr_sysv_abi: OS << "sysv_abi"; break;
+ case AttributedType::attr_regcall: OS << "regcall"; break;
case AttributedType::attr_pcs:
case AttributedType::attr_pcs_vfp: {
OS << "pcs(";
@@ -1368,6 +1377,28 @@ void TypePrinter::printObjCInterfaceBefore(const ObjCInterfaceType *T,
void TypePrinter::printObjCInterfaceAfter(const ObjCInterfaceType *T,
raw_ostream &OS) { }
+void TypePrinter::printObjCTypeParamBefore(const ObjCTypeParamType *T,
+ raw_ostream &OS) {
+ OS << T->getDecl()->getName();
+ if (!T->qual_empty()) {
+ bool isFirst = true;
+ OS << '<';
+ for (const auto *I : T->quals()) {
+ if (isFirst)
+ isFirst = false;
+ else
+ OS << ',';
+ OS << I->getName();
+ }
+ OS << '>';
+ }
+
+ spaceBeforePlaceHolder(OS);
+}
+
+void TypePrinter::printObjCTypeParamAfter(const ObjCTypeParamType *T,
+ raw_ostream &OS) { }
+
void TypePrinter::printObjCObjectBefore(const ObjCObjectType *T,
raw_ostream &OS) {
if (T->qual_empty() && T->isUnspecializedAsWritten() &&
diff --git a/lib/AST/VTableBuilder.cpp b/lib/AST/VTableBuilder.cpp
index 640fbf47aeab..e60ae33f2e5c 100644
--- a/lib/AST/VTableBuilder.cpp
+++ b/lib/AST/VTableBuilder.cpp
@@ -777,9 +777,8 @@ public:
typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
VBaseOffsetOffsetsMapTy;
-
- typedef llvm::DenseMap<BaseSubobject, uint64_t>
- AddressPointsMapTy;
+
+ typedef VTableLayout::AddressPointsMapTy AddressPointsMapTy;
typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVTableIndicesTy;
@@ -817,7 +816,7 @@ private:
/// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for
/// the most derived class.
VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
-
+
/// Components - The components of the vtable being built.
SmallVector<VTableComponent, 64> Components;
@@ -982,6 +981,10 @@ private:
}
public:
+ /// Component indices of the first component of each of the vtables in the
+ /// vtable group.
+ SmallVector<size_t, 4> VTableIndices;
+
ItaniumVTableBuilder(ItaniumVTableContext &VTables,
const CXXRecordDecl *MostDerivedClass,
CharUnits MostDerivedClassOffset,
@@ -1028,20 +1031,8 @@ public:
return MethodVTableIndices.end();
}
- /// getNumVTableComponents - Return the number of components in the vtable
- /// currently built.
- uint64_t getNumVTableComponents() const {
- return Components.size();
- }
+ ArrayRef<VTableComponent> vtable_components() const { return Components; }
- const VTableComponent *vtable_component_begin() const {
- return Components.begin();
- }
-
- const VTableComponent *vtable_component_end() const {
- return Components.end();
- }
-
AddressPointsMapTy::const_iterator address_points_begin() const {
return AddressPoints.begin();
}
@@ -1639,6 +1630,9 @@ void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables(
bool BaseIsVirtualInLayoutClass, CharUnits OffsetInLayoutClass) {
assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
+ unsigned VTableIndex = Components.size();
+ VTableIndices.push_back(VTableIndex);
+
// Add vcall and vbase offsets for this vtable.
VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
Base, BaseIsVirtualInLayoutClass,
@@ -1695,9 +1689,11 @@ void ItaniumVTableBuilder::LayoutPrimaryAndSecondaryVTables(
// Add all address points.
while (true) {
- AddressPoints.insert(std::make_pair(
- BaseSubobject(RD, OffsetInLayoutClass),
- AddressPoint));
+ AddressPoints.insert(
+ std::make_pair(BaseSubobject(RD, OffsetInLayoutClass),
+ VTableLayout::AddressPointLocation{
+ unsigned(VTableIndices.size() - 1),
+ unsigned(AddressPoint - VTableIndex)}));
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
@@ -1901,7 +1897,8 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex;
for (const auto &AP : AddressPoints) {
const BaseSubobject &Base = AP.first;
- uint64_t Index = AP.second;
+ uint64_t Index =
+ VTableIndices[AP.second.VTableIndex] + AP.second.AddressPointIndex;
AddressPointsByIndex.insert(std::make_pair(Index, Base));
}
@@ -2203,30 +2200,24 @@ void ItaniumVTableBuilder::dumpLayout(raw_ostream &Out) {
}
}
-VTableLayout::VTableLayout(uint64_t NumVTableComponents,
- const VTableComponent *VTableComponents,
- uint64_t NumVTableThunks,
- const VTableThunkTy *VTableThunks,
- const AddressPointsMapTy &AddressPoints,
- bool IsMicrosoftABI)
- : NumVTableComponents(NumVTableComponents),
- VTableComponents(new VTableComponent[NumVTableComponents]),
- NumVTableThunks(NumVTableThunks),
- VTableThunks(new VTableThunkTy[NumVTableThunks]),
- AddressPoints(AddressPoints),
- IsMicrosoftABI(IsMicrosoftABI) {
- std::copy(VTableComponents, VTableComponents+NumVTableComponents,
- this->VTableComponents.get());
- std::copy(VTableThunks, VTableThunks+NumVTableThunks,
- this->VTableThunks.get());
- std::sort(this->VTableThunks.get(),
- this->VTableThunks.get() + NumVTableThunks,
+VTableLayout::VTableLayout(ArrayRef<size_t> VTableIndices,
+ ArrayRef<VTableComponent> VTableComponents,
+ ArrayRef<VTableThunkTy> VTableThunks,
+ const AddressPointsMapTy &AddressPoints)
+ : VTableComponents(VTableComponents), VTableThunks(VTableThunks),
+ AddressPoints(AddressPoints) {
+ if (VTableIndices.size() <= 1)
+ assert(VTableIndices.size() == 1 && VTableIndices[0] == 0);
+ else
+ this->VTableIndices = OwningArrayRef<size_t>(VTableIndices);
+
+ std::sort(this->VTableThunks.begin(), this->VTableThunks.end(),
[](const VTableLayout::VTableThunkTy &LHS,
const VTableLayout::VTableThunkTy &RHS) {
- assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
- "Different thunks should have unique indices!");
- return LHS.first < RHS.first;
- });
+ assert((LHS.first != RHS.first || LHS.second == RHS.second) &&
+ "Different thunks should have unique indices!");
+ return LHS.first < RHS.first;
+ });
}
VTableLayout::~VTableLayout() { }
@@ -2234,9 +2225,7 @@ VTableLayout::~VTableLayout() { }
ItaniumVTableContext::ItaniumVTableContext(ASTContext &Context)
: VTableContextBase(/*MS=*/false) {}
-ItaniumVTableContext::~ItaniumVTableContext() {
- llvm::DeleteContainerSeconds(VTableLayouts);
-}
+ItaniumVTableContext::~ItaniumVTableContext() {}
uint64_t ItaniumVTableContext::getMethodVTableIndex(GlobalDecl GD) {
MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD);
@@ -2280,21 +2269,19 @@ ItaniumVTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
return I->second;
}
-static VTableLayout *CreateVTableLayout(const ItaniumVTableBuilder &Builder) {
+static std::unique_ptr<VTableLayout>
+CreateVTableLayout(const ItaniumVTableBuilder &Builder) {
SmallVector<VTableLayout::VTableThunkTy, 1>
VTableThunks(Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
- return new VTableLayout(Builder.getNumVTableComponents(),
- Builder.vtable_component_begin(),
- VTableThunks.size(),
- VTableThunks.data(),
- Builder.getAddressPoints(),
- /*IsMicrosoftABI=*/false);
+ return llvm::make_unique<VTableLayout>(
+ Builder.VTableIndices, Builder.vtable_components(), VTableThunks,
+ Builder.getAddressPoints());
}
void
ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) {
- const VTableLayout *&Entry = VTableLayouts[RD];
+ std::unique_ptr<const VTableLayout> &Entry = VTableLayouts[RD];
// Check if we've computed this information before.
if (Entry)
@@ -2330,7 +2317,8 @@ ItaniumVTableContext::computeVTableRelatedInformation(const CXXRecordDecl *RD) {
}
}
-VTableLayout *ItaniumVTableContext::createConstructionVTableLayout(
+std::unique_ptr<VTableLayout>
+ItaniumVTableContext::createConstructionVTableLayout(
const CXXRecordDecl *MostDerivedClass, CharUnits MostDerivedClassOffset,
bool MostDerivedClassIsVirtual, const CXXRecordDecl *LayoutClass) {
ItaniumVTableBuilder Builder(*this, MostDerivedClass, MostDerivedClassOffset,
@@ -2538,12 +2526,12 @@ private:
public:
VFTableBuilder(MicrosoftVTableContext &VTables,
- const CXXRecordDecl *MostDerivedClass, const VPtrInfo *Which)
+ const CXXRecordDecl *MostDerivedClass, const VPtrInfo &Which)
: VTables(VTables),
Context(MostDerivedClass->getASTContext()),
MostDerivedClass(MostDerivedClass),
MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)),
- WhichVFPtr(*Which),
+ WhichVFPtr(Which),
Overriders(MostDerivedClass, CharUnits(), MostDerivedClass) {
// Provide the RTTI component if RTTIData is enabled. If the vftable would
// be available externally, we should not provide the RTTI componenent. It
@@ -2570,15 +2558,7 @@ public:
MethodVFTableLocations.end());
}
- uint64_t getNumVTableComponents() const { return Components.size(); }
-
- const VTableComponent *vtable_component_begin() const {
- return Components.begin();
- }
-
- const VTableComponent *vtable_component_end() const {
- return Components.end();
- }
+ ArrayRef<VTableComponent> vtable_components() const { return Components; }
VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
return VTableThunks.begin();
@@ -2931,8 +2911,8 @@ void VFTableBuilder::AddMethods(BaseSubobject Base, unsigned BaseDepth,
// class.
const CXXRecordDecl *NextBase = nullptr, *NextLastVBase = LastVBase;
CharUnits NextBaseOffset;
- if (BaseDepth < WhichVFPtr.PathToBaseWithVPtr.size()) {
- NextBase = WhichVFPtr.PathToBaseWithVPtr[BaseDepth];
+ if (BaseDepth < WhichVFPtr.PathToIntroducingObject.size()) {
+ NextBase = WhichVFPtr.PathToIntroducingObject[BaseDepth];
if (isDirectVBase(NextBase, RD)) {
NextLastVBase = NextBase;
NextBaseOffset = MostDerivedClassLayout.getVBaseClassOffset(NextBase);
@@ -3124,7 +3104,7 @@ static void dumpMicrosoftThunkAdjustment(const ThunkInfo &TI, raw_ostream &Out,
void VFTableBuilder::dumpLayout(raw_ostream &Out) {
Out << "VFTable for ";
- PrintBasePath(WhichVFPtr.PathToBaseWithVPtr, Out);
+ PrintBasePath(WhichVFPtr.PathToIntroducingObject, Out);
Out << "'";
MostDerivedClass->printQualifiedName(Out);
Out << "' (" << Components.size()
@@ -3278,7 +3258,7 @@ void MicrosoftVTableContext::computeVTablePaths(bool ForVBTables,
// Base case: this subobject has its own vptr.
if (ForVBTables ? Layout.hasOwnVBPtr() : Layout.hasOwnVFPtr())
- Paths.push_back(new VPtrInfo(RD));
+ Paths.push_back(llvm::make_unique<VPtrInfo>(RD));
// Recursive case: get all the vbtables from our bases and remove anything
// that shares a virtual base.
@@ -3294,14 +3274,14 @@ void MicrosoftVTableContext::computeVTablePaths(bool ForVBTables,
const VPtrInfoVector &BasePaths =
ForVBTables ? enumerateVBTables(Base) : getVFPtrOffsets(Base);
- for (VPtrInfo *BaseInfo : BasePaths) {
+ for (const std::unique_ptr<VPtrInfo> &BaseInfo : BasePaths) {
// Don't include the path if it goes through a virtual base that we've
// already included.
if (setsIntersect(VBasesSeen, BaseInfo->ContainingVBases))
continue;
// Copy the path and adjust it as necessary.
- VPtrInfo *P = new VPtrInfo(*BaseInfo);
+ auto P = llvm::make_unique<VPtrInfo>(*BaseInfo);
// We mangle Base into the path if the path would've been ambiguous and it
// wasn't already extended with Base.
@@ -3311,10 +3291,10 @@ void MicrosoftVTableContext::computeVTablePaths(bool ForVBTables,
// Keep track of which vtable the derived class is going to extend with
// new methods or bases. We append to either the vftable of our primary
// base, or the first non-virtual base that has a vbtable.
- if (P->ReusingBase == Base &&
+ if (P->ObjectWithVPtr == Base &&
Base == (ForVBTables ? Layout.getBaseSharingVBPtr()
: Layout.getPrimaryBase()))
- P->ReusingBase = RD;
+ P->ObjectWithVPtr = RD;
// Keep track of the full adjustment from the MDC to this vtable. The
// adjustment is captured by an optional vbase and a non-virtual offset.
@@ -3328,7 +3308,7 @@ void MicrosoftVTableContext::computeVTablePaths(bool ForVBTables,
if (const CXXRecordDecl *VB = P->getVBaseWithVPtr())
P->FullOffsetInMDC += Layout.getVBaseClassOffset(VB);
- Paths.push_back(P);
+ Paths.push_back(std::move(P));
}
if (B.isVirtual())
@@ -3347,10 +3327,10 @@ void MicrosoftVTableContext::computeVTablePaths(bool ForVBTables,
Changed = rebucketPaths(Paths);
}
-static bool extendPath(VPtrInfo *P) {
- if (P->NextBaseToMangle) {
- P->MangledPath.push_back(P->NextBaseToMangle);
- P->NextBaseToMangle = nullptr;// Prevent the path from being extended twice.
+static bool extendPath(VPtrInfo &P) {
+ if (P.NextBaseToMangle) {
+ P.MangledPath.push_back(P.NextBaseToMangle);
+ P.NextBaseToMangle = nullptr;// Prevent the path from being extended twice.
return true;
}
return false;
@@ -3363,10 +3343,13 @@ static bool rebucketPaths(VPtrInfoVector &Paths) {
// sorted vector to implement a multiset to form the buckets. Note that the
// ordering is based on pointers, but it doesn't change our output order. The
// current algorithm is designed to match MSVC 2012's names.
- VPtrInfoVector PathsSorted(Paths);
+ llvm::SmallVector<std::reference_wrapper<VPtrInfo>, 2> PathsSorted;
+ PathsSorted.reserve(Paths.size());
+ for (auto& P : Paths)
+ PathsSorted.push_back(*P);
std::sort(PathsSorted.begin(), PathsSorted.end(),
- [](const VPtrInfo *LHS, const VPtrInfo *RHS) {
- return LHS->MangledPath < RHS->MangledPath;
+ [](const VPtrInfo &LHS, const VPtrInfo &RHS) {
+ return LHS.MangledPath < RHS.MangledPath;
});
bool Changed = false;
for (size_t I = 0, E = PathsSorted.size(); I != E;) {
@@ -3374,8 +3357,9 @@ static bool rebucketPaths(VPtrInfoVector &Paths) {
size_t BucketStart = I;
do {
++I;
- } while (I != E && PathsSorted[BucketStart]->MangledPath ==
- PathsSorted[I]->MangledPath);
+ } while (I != E &&
+ PathsSorted[BucketStart].get().MangledPath ==
+ PathsSorted[I].get().MangledPath);
// If this bucket has multiple paths, extend them all.
if (I - BucketStart > 1) {
@@ -3387,13 +3371,7 @@ static bool rebucketPaths(VPtrInfoVector &Paths) {
return Changed;
}
-MicrosoftVTableContext::~MicrosoftVTableContext() {
- for (auto &P : VFPtrLocations)
- llvm::DeleteContainerPointers(*P.second);
- llvm::DeleteContainerSeconds(VFPtrLocations);
- llvm::DeleteContainerSeconds(VFTableLayouts);
- llvm::DeleteContainerSeconds(VBaseInfo);
-}
+MicrosoftVTableContext::~MicrosoftVTableContext() {}
namespace {
typedef llvm::SetVector<BaseSubobject, std::vector<BaseSubobject>,
@@ -3401,14 +3379,14 @@ typedef llvm::SetVector<BaseSubobject, std::vector<BaseSubobject>,
}
// This recursive function finds all paths from a subobject centered at
-// (RD, Offset) to the subobject located at BaseWithVPtr.
+// (RD, Offset) to the subobject located at IntroducingObject.
static void findPathsToSubobject(ASTContext &Context,
const ASTRecordLayout &MostDerivedLayout,
const CXXRecordDecl *RD, CharUnits Offset,
- BaseSubobject BaseWithVPtr,
+ BaseSubobject IntroducingObject,
FullPathTy &FullPath,
std::list<FullPathTy> &Paths) {
- if (BaseSubobject(RD, Offset) == BaseWithVPtr) {
+ if (BaseSubobject(RD, Offset) == IntroducingObject) {
Paths.push_back(FullPath);
return;
}
@@ -3422,7 +3400,7 @@ static void findPathsToSubobject(ASTContext &Context,
: Offset + Layout.getBaseClassOffset(Base);
FullPath.insert(BaseSubobject(Base, NewOffset));
findPathsToSubobject(Context, MostDerivedLayout, Base, NewOffset,
- BaseWithVPtr, FullPath, Paths);
+ IntroducingObject, FullPath, Paths);
FullPath.pop_back();
}
}
@@ -3477,7 +3455,8 @@ static CharUnits getOffsetOfFullPath(ASTContext &Context,
// two paths introduce overrides which the other path doesn't contain, issue a
// diagnostic.
static const FullPathTy *selectBestPath(ASTContext &Context,
- const CXXRecordDecl *RD, VPtrInfo *Info,
+ const CXXRecordDecl *RD,
+ const VPtrInfo &Info,
std::list<FullPathTy> &FullPaths) {
// Handle some easy cases first.
if (FullPaths.empty())
@@ -3497,7 +3476,7 @@ static const FullPathTy *selectBestPath(ASTContext &Context,
CharUnits BaseOffset =
getOffsetOfFullPath(Context, TopLevelRD, SpecificPath);
FinalOverriders Overriders(TopLevelRD, CharUnits::Zero(), TopLevelRD);
- for (const CXXMethodDecl *MD : Info->BaseWithVPtr->methods()) {
+ for (const CXXMethodDecl *MD : Info.IntroducingObject->methods()) {
if (!MD->isVirtual())
continue;
FinalOverriders::OverriderInfo OI =
@@ -3552,18 +3531,18 @@ static void computeFullPathsForVFTables(ASTContext &Context,
const ASTRecordLayout &MostDerivedLayout = Context.getASTRecordLayout(RD);
FullPathTy FullPath;
std::list<FullPathTy> FullPaths;
- for (VPtrInfo *Info : Paths) {
+ for (const std::unique_ptr<VPtrInfo>& Info : Paths) {
findPathsToSubobject(
Context, MostDerivedLayout, RD, CharUnits::Zero(),
- BaseSubobject(Info->BaseWithVPtr, Info->FullOffsetInMDC), FullPath,
+ BaseSubobject(Info->IntroducingObject, Info->FullOffsetInMDC), FullPath,
FullPaths);
FullPath.clear();
removeRedundantPaths(FullPaths);
- Info->PathToBaseWithVPtr.clear();
+ Info->PathToIntroducingObject.clear();
if (const FullPathTy *BestPath =
- selectBestPath(Context, RD, Info, FullPaths))
+ selectBestPath(Context, RD, *Info, FullPaths))
for (const BaseSubobject &BSO : *BestPath)
- Info->PathToBaseWithVPtr.push_back(BSO.getBase());
+ Info->PathToIntroducingObject.push_back(BSO.getBase());
FullPaths.clear();
}
}
@@ -3578,22 +3557,24 @@ void MicrosoftVTableContext::computeVTableRelatedInformation(
const VTableLayout::AddressPointsMapTy EmptyAddressPointsMap;
- VPtrInfoVector *VFPtrs = new VPtrInfoVector();
- computeVTablePaths(/*ForVBTables=*/false, RD, *VFPtrs);
- computeFullPathsForVFTables(Context, RD, *VFPtrs);
- VFPtrLocations[RD] = VFPtrs;
+ {
+ VPtrInfoVector VFPtrs;
+ computeVTablePaths(/*ForVBTables=*/false, RD, VFPtrs);
+ computeFullPathsForVFTables(Context, RD, VFPtrs);
+ VFPtrLocations[RD] = std::move(VFPtrs);
+ }
MethodVFTableLocationsTy NewMethodLocations;
- for (const VPtrInfo *VFPtr : *VFPtrs) {
- VFTableBuilder Builder(*this, RD, VFPtr);
+ for (const std::unique_ptr<VPtrInfo> &VFPtr : VFPtrLocations[RD]) {
+ VFTableBuilder Builder(*this, RD, *VFPtr);
VFTableIdTy id(RD, VFPtr->FullOffsetInMDC);
assert(VFTableLayouts.count(id) == 0);
SmallVector<VTableLayout::VTableThunkTy, 1> VTableThunks(
Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
- VFTableLayouts[id] = new VTableLayout(
- Builder.getNumVTableComponents(), Builder.vtable_component_begin(),
- VTableThunks.size(), VTableThunks.data(), EmptyAddressPointsMap, true);
+ VFTableLayouts[id] = llvm::make_unique<VTableLayout>(
+ ArrayRef<size_t>{0}, Builder.vtable_components(), VTableThunks,
+ EmptyAddressPointsMap);
Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
for (const auto &Loc : Builder.vtable_locations()) {
@@ -3670,17 +3651,18 @@ void MicrosoftVTableContext::dumpMethodLocations(
Out.flush();
}
-const VirtualBaseInfo *MicrosoftVTableContext::computeVBTableRelatedInformation(
+const VirtualBaseInfo &MicrosoftVTableContext::computeVBTableRelatedInformation(
const CXXRecordDecl *RD) {
VirtualBaseInfo *VBI;
{
// Get or create a VBI for RD. Don't hold a reference to the DenseMap cell,
// as it may be modified and rehashed under us.
- VirtualBaseInfo *&Entry = VBaseInfo[RD];
+ std::unique_ptr<VirtualBaseInfo> &Entry = VBaseInfo[RD];
if (Entry)
- return Entry;
- Entry = VBI = new VirtualBaseInfo();
+ return *Entry;
+ Entry = llvm::make_unique<VirtualBaseInfo>();
+ VBI = Entry.get();
}
computeVTablePaths(/*ForVBTables=*/true, RD, VBI->VBPtrPaths);
@@ -3690,10 +3672,10 @@ const VirtualBaseInfo *MicrosoftVTableContext::computeVBTableRelatedInformation(
if (const CXXRecordDecl *VBPtrBase = Layout.getBaseSharingVBPtr()) {
// If the Derived class shares the vbptr with a non-virtual base, the shared
// virtual bases come first so that the layout is the same.
- const VirtualBaseInfo *BaseInfo =
+ const VirtualBaseInfo &BaseInfo =
computeVBTableRelatedInformation(VBPtrBase);
- VBI->VBTableIndices.insert(BaseInfo->VBTableIndices.begin(),
- BaseInfo->VBTableIndices.end());
+ VBI->VBTableIndices.insert(BaseInfo.VBTableIndices.begin(),
+ BaseInfo.VBTableIndices.end());
}
// New vbases are added to the end of the vbtable.
@@ -3705,19 +3687,19 @@ const VirtualBaseInfo *MicrosoftVTableContext::computeVBTableRelatedInformation(
VBI->VBTableIndices[CurVBase] = VBTableIndex++;
}
- return VBI;
+ return *VBI;
}
unsigned MicrosoftVTableContext::getVBTableIndex(const CXXRecordDecl *Derived,
const CXXRecordDecl *VBase) {
- const VirtualBaseInfo *VBInfo = computeVBTableRelatedInformation(Derived);
- assert(VBInfo->VBTableIndices.count(VBase));
- return VBInfo->VBTableIndices.find(VBase)->second;
+ const VirtualBaseInfo &VBInfo = computeVBTableRelatedInformation(Derived);
+ assert(VBInfo.VBTableIndices.count(VBase));
+ return VBInfo.VBTableIndices.find(VBase)->second;
}
const VPtrInfoVector &
MicrosoftVTableContext::enumerateVBTables(const CXXRecordDecl *RD) {
- return computeVBTableRelatedInformation(RD)->VBPtrPaths;
+ return computeVBTableRelatedInformation(RD).VBPtrPaths;
}
const VPtrInfoVector &
@@ -3725,7 +3707,7 @@ MicrosoftVTableContext::getVFPtrOffsets(const CXXRecordDecl *RD) {
computeVTableRelatedInformation(RD);
assert(VFPtrLocations.count(RD) && "Couldn't find vfptr locations");
- return *VFPtrLocations[RD];
+ return VFPtrLocations[RD];
}
const VTableLayout &
diff --git a/lib/ASTMatchers/ASTMatchFinder.cpp b/lib/ASTMatchers/ASTMatchFinder.cpp
index 19e5743ea1cb..49b15ee68500 100644
--- a/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -126,6 +126,8 @@ public:
traverse(*Q);
else if (const TypeLoc *T = DynNode.get<TypeLoc>())
traverse(*T);
+ else if (const auto *C = DynNode.get<CXXCtorInitializer>())
+ traverse(*C);
// FIXME: Add other base types after adding tests.
// It's OK to always overwrite the bound nodes, as if there was
@@ -194,6 +196,12 @@ public:
return false;
return traverse(NNS);
}
+ bool TraverseConstructorInitializer(CXXCtorInitializer *CtorInit) {
+ if (!CtorInit)
+ return true;
+ ScopedIncrement ScopedDepth(&CurrentDepth);
+ return traverse(*CtorInit);
+ }
bool shouldVisitTemplateInstantiations() const { return true; }
bool shouldVisitImplicitCode() const { return true; }
@@ -235,6 +243,10 @@ private:
bool baseTraverse(NestedNameSpecifierLoc NNS) {
return VisitorBase::TraverseNestedNameSpecifierLoc(NNS);
}
+ bool baseTraverse(const CXXCtorInitializer &CtorInit) {
+ return VisitorBase::TraverseConstructorInitializer(
+ const_cast<CXXCtorInitializer *>(&CtorInit));
+ }
// Sets 'Matched' to true if 'Matcher' matches 'Node' and:
// 0 < CurrentDepth <= MaxDepth.
@@ -371,6 +383,7 @@ public:
bool TraverseTypeLoc(TypeLoc TypeNode);
bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
+ bool TraverseConstructorInitializer(CXXCtorInitializer *CtorInit);
// Matches children or descendants of 'Node' with 'BaseMatcher'.
bool memoizedMatchesRecursively(const ast_type_traits::DynTypedNode &Node,
@@ -472,6 +485,8 @@ public:
match(*N);
} else if (auto *N = Node.get<TypeLoc>()) {
match(*N);
+ } else if (auto *N = Node.get<CXXCtorInitializer>()) {
+ match(*N);
}
}
@@ -593,6 +608,9 @@ private:
void matchDispatch(const NestedNameSpecifierLoc *Node) {
matchWithoutFilter(*Node, Matchers->NestedNameSpecifierLoc);
}
+ void matchDispatch(const CXXCtorInitializer *Node) {
+ matchWithoutFilter(*Node, Matchers->CtorInit);
+ }
void matchDispatch(const void *) { /* Do nothing. */ }
/// @}
@@ -864,6 +882,17 @@ bool MatchASTVisitor::TraverseNestedNameSpecifierLoc(
RecursiveASTVisitor<MatchASTVisitor>::TraverseNestedNameSpecifierLoc(NNS);
}
+bool MatchASTVisitor::TraverseConstructorInitializer(
+ CXXCtorInitializer *CtorInit) {
+ if (!CtorInit)
+ return true;
+
+ match(*CtorInit);
+
+ return RecursiveASTVisitor<MatchASTVisitor>::TraverseConstructorInitializer(
+ CtorInit);
+}
+
class MatchASTConsumer : public ASTConsumer {
public:
MatchASTConsumer(MatchFinder *Finder,
@@ -934,6 +963,12 @@ void MatchFinder::addMatcher(const TypeLocMatcher &NodeMatch,
Matchers.AllCallbacks.insert(Action);
}
+void MatchFinder::addMatcher(const CXXCtorInitializerMatcher &NodeMatch,
+ MatchCallback *Action) {
+ Matchers.CtorInit.emplace_back(NodeMatch, Action);
+ Matchers.AllCallbacks.insert(Action);
+}
+
bool MatchFinder::addDynamicMatcher(const internal::DynTypedMatcher &NodeMatch,
MatchCallback *Action) {
if (NodeMatch.canConvertTo<Decl>()) {
@@ -954,6 +989,9 @@ bool MatchFinder::addDynamicMatcher(const internal::DynTypedMatcher &NodeMatch,
} else if (NodeMatch.canConvertTo<TypeLoc>()) {
addMatcher(NodeMatch.convertTo<TypeLoc>(), Action);
return true;
+ } else if (NodeMatch.canConvertTo<CXXCtorInitializer>()) {
+ addMatcher(NodeMatch.convertTo<CXXCtorInitializer>(), Action);
+ return true;
}
return false;
}
diff --git a/lib/ASTMatchers/ASTMatchersInternal.cpp b/lib/ASTMatchers/ASTMatchersInternal.cpp
index 107052ef1ded..f0bfbf9e32d8 100644
--- a/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -72,10 +72,10 @@ private:
};
class IdDynMatcher : public DynMatcherInterface {
- public:
+public:
IdDynMatcher(StringRef ID,
- const IntrusiveRefCntPtr<DynMatcherInterface> &InnerMatcher)
- : ID(ID), InnerMatcher(InnerMatcher) {}
+ IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher)
+ : ID(ID), InnerMatcher(std::move(InnerMatcher)) {}
bool dynMatches(const ast_type_traits::DynTypedNode &DynNode,
ASTMatchFinder *Finder,
@@ -85,7 +85,7 @@ class IdDynMatcher : public DynMatcherInterface {
return Result;
}
- private:
+private:
const std::string ID;
const IntrusiveRefCntPtr<DynMatcherInterface> InnerMatcher;
};
@@ -210,8 +210,9 @@ bool DynTypedMatcher::matchesNoKindCheck(
llvm::Optional<DynTypedMatcher> DynTypedMatcher::tryBind(StringRef ID) const {
if (!AllowBind) return llvm::None;
auto Result = *this;
- Result.Implementation = new IdDynMatcher(ID, Result.Implementation);
- return Result;
+ Result.Implementation =
+ new IdDynMatcher(ID, std::move(Result.Implementation));
+ return std::move(Result);
}
bool DynTypedMatcher::canConvertTo(ast_type_traits::ASTNodeKind To) const {
diff --git a/lib/ASTMatchers/Dynamic/Marshallers.h b/lib/ASTMatchers/Dynamic/Marshallers.h
index 7b1a30702633..fb6b349a811c 100644
--- a/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -480,8 +480,8 @@ template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename FromTypes, typename ToTypes>
class AdaptativeOverloadCollector {
public:
- AdaptativeOverloadCollector(StringRef Name,
- std::vector<MatcherDescriptor *> &Out)
+ AdaptativeOverloadCollector(
+ StringRef Name, std::vector<std::unique_ptr<MatcherDescriptor>> &Out)
: Name(Name), Out(Out) {
collect(FromTypes());
}
@@ -499,7 +499,7 @@ private:
inline void collect(FromTypeList);
StringRef Name;
- std::vector<MatcherDescriptor *> &Out;
+ std::vector<std::unique_ptr<MatcherDescriptor>> &Out;
};
/// \brief MatcherDescriptor that wraps multiple "overloads" of the same
@@ -509,8 +509,10 @@ private:
/// more than one overloads match the arguments.
class OverloadedMatcherDescriptor : public MatcherDescriptor {
public:
- OverloadedMatcherDescriptor(ArrayRef<MatcherDescriptor *> Callbacks)
- : Overloads(Callbacks.begin(), Callbacks.end()) {}
+ OverloadedMatcherDescriptor(
+ MutableArrayRef<std::unique_ptr<MatcherDescriptor>> Callbacks)
+ : Overloads(std::make_move_iterator(Callbacks.begin()),
+ std::make_move_iterator(Callbacks.end())) {}
~OverloadedMatcherDescriptor() override {}
@@ -641,36 +643,37 @@ private:
/// \brief 0-arg overload
template <typename ReturnType>
-MatcherDescriptor *makeMatcherAutoMarshall(ReturnType (*Func)(),
- StringRef MatcherName) {
+std::unique_ptr<MatcherDescriptor>
+makeMatcherAutoMarshall(ReturnType (*Func)(), StringRef MatcherName) {
std::vector<ast_type_traits::ASTNodeKind> RetTypes;
BuildReturnTypeVector<ReturnType>::build(RetTypes);
- return new FixedArgCountMatcherDescriptor(
+ return llvm::make_unique<FixedArgCountMatcherDescriptor>(
matcherMarshall0<ReturnType>, reinterpret_cast<void (*)()>(Func),
MatcherName, RetTypes, None);
}
/// \brief 1-arg overload
template <typename ReturnType, typename ArgType1>
-MatcherDescriptor *makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1),
- StringRef MatcherName) {
+std::unique_ptr<MatcherDescriptor>
+makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1), StringRef MatcherName) {
std::vector<ast_type_traits::ASTNodeKind> RetTypes;
BuildReturnTypeVector<ReturnType>::build(RetTypes);
ArgKind AK = ArgTypeTraits<ArgType1>::getKind();
- return new FixedArgCountMatcherDescriptor(
+ return llvm::make_unique<FixedArgCountMatcherDescriptor>(
matcherMarshall1<ReturnType, ArgType1>,
reinterpret_cast<void (*)()>(Func), MatcherName, RetTypes, AK);
}
/// \brief 2-arg overload
template <typename ReturnType, typename ArgType1, typename ArgType2>
-MatcherDescriptor *makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2),
- StringRef MatcherName) {
+std::unique_ptr<MatcherDescriptor>
+makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2),
+ StringRef MatcherName) {
std::vector<ast_type_traits::ASTNodeKind> RetTypes;
BuildReturnTypeVector<ReturnType>::build(RetTypes);
ArgKind AKs[] = { ArgTypeTraits<ArgType1>::getKind(),
ArgTypeTraits<ArgType2>::getKind() };
- return new FixedArgCountMatcherDescriptor(
+ return llvm::make_unique<FixedArgCountMatcherDescriptor>(
matcherMarshall2<ReturnType, ArgType1, ArgType2>,
reinterpret_cast<void (*)()>(Func), MatcherName, RetTypes, AKs);
}
@@ -678,10 +681,10 @@ MatcherDescriptor *makeMatcherAutoMarshall(ReturnType (*Func)(ArgType1, ArgType2
/// \brief Variadic overload.
template <typename ResultT, typename ArgT,
ResultT (*Func)(ArrayRef<const ArgT *>)>
-MatcherDescriptor *makeMatcherAutoMarshall(
+std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
ast_matchers::internal::VariadicFunction<ResultT, ArgT, Func> VarFunc,
StringRef MatcherName) {
- return new VariadicFuncMatcherDescriptor(VarFunc, MatcherName);
+ return llvm::make_unique<VariadicFuncMatcherDescriptor>(VarFunc, MatcherName);
}
/// \brief Overload for VariadicDynCastAllOfMatchers.
@@ -689,24 +692,24 @@ MatcherDescriptor *makeMatcherAutoMarshall(
/// Not strictly necessary, but DynCastAllOfMatcherDescriptor gives us better
/// completion results for that type of matcher.
template <typename BaseT, typename DerivedT>
-MatcherDescriptor *
-makeMatcherAutoMarshall(ast_matchers::internal::VariadicDynCastAllOfMatcher<
- BaseT, DerivedT> VarFunc,
- StringRef MatcherName) {
- return new DynCastAllOfMatcherDescriptor(VarFunc, MatcherName);
+std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
+ ast_matchers::internal::VariadicDynCastAllOfMatcher<BaseT, DerivedT>
+ VarFunc,
+ StringRef MatcherName) {
+ return llvm::make_unique<DynCastAllOfMatcherDescriptor>(VarFunc, MatcherName);
}
/// \brief Argument adaptative overload.
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename FromTypes, typename ToTypes>
-MatcherDescriptor *
-makeMatcherAutoMarshall(ast_matchers::internal::ArgumentAdaptingMatcherFunc<
- ArgumentAdapterT, FromTypes, ToTypes>,
- StringRef MatcherName) {
- std::vector<MatcherDescriptor *> Overloads;
+std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
+ ast_matchers::internal::ArgumentAdaptingMatcherFunc<ArgumentAdapterT,
+ FromTypes, ToTypes>,
+ StringRef MatcherName) {
+ std::vector<std::unique_ptr<MatcherDescriptor>> Overloads;
AdaptativeOverloadCollector<ArgumentAdapterT, FromTypes, ToTypes>(MatcherName,
Overloads);
- return new OverloadedMatcherDescriptor(Overloads);
+ return llvm::make_unique<OverloadedMatcherDescriptor>(Overloads);
}
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
@@ -721,12 +724,12 @@ inline void AdaptativeOverloadCollector<ArgumentAdapterT, FromTypes,
/// \brief Variadic operator overload.
template <unsigned MinCount, unsigned MaxCount>
-MatcherDescriptor *
-makeMatcherAutoMarshall(ast_matchers::internal::VariadicOperatorMatcherFunc<
- MinCount, MaxCount> Func,
- StringRef MatcherName) {
- return new VariadicOperatorMatcherDescriptor(MinCount, MaxCount, Func.Op,
- MatcherName);
+std::unique_ptr<MatcherDescriptor> makeMatcherAutoMarshall(
+ ast_matchers::internal::VariadicOperatorMatcherFunc<MinCount, MaxCount>
+ Func,
+ StringRef MatcherName) {
+ return llvm::make_unique<VariadicOperatorMatcherDescriptor>(
+ MinCount, MaxCount, Func.Op, MatcherName);
}
} // namespace internal
diff --git a/lib/ASTMatchers/Dynamic/Parser.cpp b/lib/ASTMatchers/Dynamic/Parser.cpp
index cf9dab6dc7db..ce8d0a9a0206 100644
--- a/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -16,7 +16,6 @@
#include "clang/ASTMatchers/Dynamic/Registry.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/Twine.h"
#include "llvm/Support/ManagedStatic.h"
#include <string>
#include <vector>
diff --git a/lib/ASTMatchers/Dynamic/Registry.cpp b/lib/ASTMatchers/Dynamic/Registry.cpp
index a8d4b88d8580..d1cab80c1a53 100644
--- a/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -31,7 +31,7 @@ namespace {
using internal::MatcherDescriptor;
-typedef llvm::StringMap<const MatcherDescriptor *> ConstructorMap;
+typedef llvm::StringMap<std::unique_ptr<const MatcherDescriptor>> ConstructorMap;
class RegistryMaps {
public:
RegistryMaps();
@@ -40,14 +40,16 @@ public:
const ConstructorMap &constructors() const { return Constructors; }
private:
- void registerMatcher(StringRef MatcherName, MatcherDescriptor *Callback);
+ void registerMatcher(StringRef MatcherName,
+ std::unique_ptr<MatcherDescriptor> Callback);
+
ConstructorMap Constructors;
};
-void RegistryMaps::registerMatcher(StringRef MatcherName,
- MatcherDescriptor *Callback) {
+void RegistryMaps::registerMatcher(
+ StringRef MatcherName, std::unique_ptr<MatcherDescriptor> Callback) {
assert(Constructors.find(MatcherName) == Constructors.end());
- Constructors[MatcherName] = Callback;
+ Constructors[MatcherName] = std::move(Callback);
}
#define REGISTER_MATCHER(name) \
@@ -55,19 +57,19 @@ void RegistryMaps::registerMatcher(StringRef MatcherName,
::clang::ast_matchers::name, #name));
#define SPECIFIC_MATCHER_OVERLOAD(name, Id) \
- static_cast< ::clang::ast_matchers::name##_Type##Id>( \
+ static_cast<::clang::ast_matchers::name##_Type##Id>( \
::clang::ast_matchers::name)
#define REGISTER_OVERLOADED_2(name) \
do { \
- MatcherDescriptor *Callbacks[] = { \
- internal::makeMatcherAutoMarshall(SPECIFIC_MATCHER_OVERLOAD(name, 0), \
- #name), \
- internal::makeMatcherAutoMarshall(SPECIFIC_MATCHER_OVERLOAD(name, 1), \
- #name) \
- }; \
- registerMatcher(#name, \
- new internal::OverloadedMatcherDescriptor(Callbacks)); \
+ std::unique_ptr<MatcherDescriptor> Callbacks[] = { \
+ internal::makeMatcherAutoMarshall(SPECIFIC_MATCHER_OVERLOAD(name, 0), \
+ #name), \
+ internal::makeMatcherAutoMarshall(SPECIFIC_MATCHER_OVERLOAD(name, 1), \
+ #name)}; \
+ registerMatcher( \
+ #name, \
+ llvm::make_unique<internal::OverloadedMatcherDescriptor>(Callbacks)); \
} while (0)
/// \brief Generate a registry map with all the known matchers.
@@ -198,6 +200,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasAncestor);
REGISTER_MATCHER(hasAnyArgument);
REGISTER_MATCHER(hasAnyConstructorInitializer);
+ REGISTER_MATCHER(hasAnyDeclaration);
REGISTER_MATCHER(hasAnyName);
REGISTER_MATCHER(hasAnyParameter);
REGISTER_MATCHER(hasAnySubstatement);
@@ -225,9 +228,11 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasEitherOperand);
REGISTER_MATCHER(hasElementType);
REGISTER_MATCHER(hasElse);
+ REGISTER_MATCHER(hasExternalFormalLinkage);
REGISTER_MATCHER(hasFalseExpression);
REGISTER_MATCHER(hasGlobalStorage);
REGISTER_MATCHER(hasImplicitDestinationType);
+ REGISTER_MATCHER(hasInClassInitializer);
REGISTER_MATCHER(hasIncrement);
REGISTER_MATCHER(hasIndex);
REGISTER_MATCHER(hasInitializer);
@@ -248,6 +253,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasQualifier);
REGISTER_MATCHER(hasRangeInit);
REGISTER_MATCHER(hasReceiverType);
+ REGISTER_MATCHER(hasReplacementType);
REGISTER_MATCHER(hasReturnValue);
REGISTER_MATCHER(hasRHS);
REGISTER_MATCHER(hasSelector);
@@ -265,6 +271,8 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasTypeLoc);
REGISTER_MATCHER(hasUnaryOperand);
REGISTER_MATCHER(hasUnarySelector);
+ REGISTER_MATCHER(hasUnderlyingDecl);
+ REGISTER_MATCHER(hasUnqualifiedDesugaredType);
REGISTER_MATCHER(hasValueType);
REGISTER_MATCHER(ifStmt);
REGISTER_MATCHER(ignoringImplicit);
@@ -391,8 +399,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(switchCase);
REGISTER_MATCHER(switchStmt);
REGISTER_MATCHER(templateArgument);
+ REGISTER_MATCHER(templateName);
REGISTER_MATCHER(templateArgumentCountIs);
REGISTER_MATCHER(templateSpecializationType);
+ REGISTER_MATCHER(templateTypeParmDecl);
REGISTER_MATCHER(templateTypeParmType);
REGISTER_MATCHER(throughUsingDecl);
REGISTER_MATCHER(to);
@@ -421,9 +431,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(withInitializer);
}
-RegistryMaps::~RegistryMaps() {
- llvm::DeleteContainerSeconds(Constructors);
-}
+RegistryMaps::~RegistryMaps() {}
static llvm::ManagedStatic<RegistryMaps> RegistryData;
@@ -431,11 +439,10 @@ static llvm::ManagedStatic<RegistryMaps> RegistryData;
// static
llvm::Optional<MatcherCtor> Registry::lookupMatcherCtor(StringRef MatcherName) {
- ConstructorMap::const_iterator it =
- RegistryData->constructors().find(MatcherName);
+ auto it = RegistryData->constructors().find(MatcherName);
return it == RegistryData->constructors().end()
? llvm::Optional<MatcherCtor>()
- : it->second;
+ : it->second.get();
}
namespace {
@@ -494,12 +501,12 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
// Search the registry for acceptable matchers.
for (const auto &M : RegistryData->constructors()) {
- const auto *Matcher = M.getValue();
+ const MatcherDescriptor& Matcher = *M.getValue();
StringRef Name = M.getKey();
std::set<ASTNodeKind> RetKinds;
- unsigned NumArgs = Matcher->isVariadic() ? 1 : Matcher->getNumArgs();
- bool IsPolymorphic = Matcher->isPolymorphic();
+ unsigned NumArgs = Matcher.isVariadic() ? 1 : Matcher.getNumArgs();
+ bool IsPolymorphic = Matcher.isPolymorphic();
std::vector<std::vector<ArgKind>> ArgsKinds(NumArgs);
unsigned MaxSpecificity = 0;
for (const ArgKind& Kind : AcceptedTypes) {
@@ -507,13 +514,13 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
continue;
unsigned Specificity;
ASTNodeKind LeastDerivedKind;
- if (Matcher->isConvertibleTo(Kind.getMatcherKind(), &Specificity,
- &LeastDerivedKind)) {
+ if (Matcher.isConvertibleTo(Kind.getMatcherKind(), &Specificity,
+ &LeastDerivedKind)) {
if (MaxSpecificity < Specificity)
MaxSpecificity = Specificity;
RetKinds.insert(LeastDerivedKind);
for (unsigned Arg = 0; Arg != NumArgs; ++Arg)
- Matcher->getArgKinds(Kind.getMatcherKind(), Arg, ArgsKinds[Arg]);
+ Matcher.getArgKinds(Kind.getMatcherKind(), Arg, ArgsKinds[Arg]);
if (IsPolymorphic)
break;
}
@@ -549,7 +556,7 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
}
}
}
- if (Matcher->isVariadic())
+ if (Matcher.isVariadic())
OS << "...";
OS << ")";
diff --git a/lib/Analysis/AnalysisDeclContext.cpp b/lib/Analysis/AnalysisDeclContext.cpp
index 6bbe8f86d48e..6b58916162f6 100644
--- a/lib/Analysis/AnalysisDeclContext.cpp
+++ b/lib/Analysis/AnalysisDeclContext.cpp
@@ -81,9 +81,7 @@ AnalysisDeclContextManager::AnalysisDeclContextManager(bool useUnoptimizedCFG,
cfgBuildOptions.AddCXXNewAllocator = addCXXNewAllocator;
}
-void AnalysisDeclContextManager::clear() {
- llvm::DeleteContainerSeconds(Contexts);
-}
+void AnalysisDeclContextManager::clear() { Contexts.clear(); }
static BodyFarm &getBodyFarm(ASTContext &C, CodeInjector *injector = nullptr) {
static BodyFarm *BF = new BodyFarm(C, injector);
@@ -307,10 +305,10 @@ AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
D = FD;
}
- AnalysisDeclContext *&AC = Contexts[D];
+ std::unique_ptr<AnalysisDeclContext> &AC = Contexts[D];
if (!AC)
- AC = new AnalysisDeclContext(this, D, cfgBuildOptions);
- return AC;
+ AC = llvm::make_unique<AnalysisDeclContext>(this, D, cfgBuildOptions);
+ return AC.get();
}
const StackFrameContext *
@@ -606,9 +604,7 @@ AnalysisDeclContext::~AnalysisDeclContext() {
}
}
-AnalysisDeclContextManager::~AnalysisDeclContextManager() {
- llvm::DeleteContainerSeconds(Contexts);
-}
+AnalysisDeclContextManager::~AnalysisDeclContextManager() {}
LocationContext::~LocationContext() {}
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index a67f0910e15a..bf3cc05cdb6e 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -1164,7 +1164,8 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
/// \brief Retrieve the type of the temporary object whose lifetime was
/// extended by a local reference with the given initializer.
static QualType getReferenceInitTemporaryType(ASTContext &Context,
- const Expr *Init) {
+ const Expr *Init,
+ bool *FoundMTE = nullptr) {
while (true) {
// Skip parentheses.
Init = Init->IgnoreParens();
@@ -1179,6 +1180,8 @@ static QualType getReferenceInitTemporaryType(ASTContext &Context,
if (const MaterializeTemporaryExpr *MTE
= dyn_cast<MaterializeTemporaryExpr>(Init)) {
Init = MTE->GetTemporaryExpr();
+ if (FoundMTE)
+ *FoundMTE = true;
continue;
}
@@ -1370,13 +1373,12 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
const Expr *Init = VD->getInit();
if (!Init)
return Scope;
- if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init))
- Init = EWC->getSubExpr();
- if (!isa<MaterializeTemporaryExpr>(Init))
- return Scope;
// Lifetime-extending a temporary.
- QT = getReferenceInitTemporaryType(*Context, Init);
+ bool FoundMTE = false;
+ QT = getReferenceInitTemporaryType(*Context, Init, &FoundMTE);
+ if (!FoundMTE)
+ return Scope;
}
// Check for constant size array. Set type to array element type.
@@ -2050,8 +2052,7 @@ CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
E = DS->decl_rend();
I != E; ++I) {
// Get the alignment of the new DeclStmt, padding out to >=8 bytes.
- unsigned A = llvm::AlignOf<DeclStmt>::Alignment < 8
- ? 8 : llvm::AlignOf<DeclStmt>::Alignment;
+ unsigned A = alignof(DeclStmt) < 8 ? 8 : alignof(DeclStmt);
// Allocate the DeclStmt using the BumpPtrAllocator. It will get
// automatically freed with the CFG.
@@ -2983,20 +2984,19 @@ CFGBlock *CFGBuilder::VisitDoStmt(DoStmt *D) {
return nullptr;
}
- if (!KnownVal.isFalse()) {
- // Add an intermediate block between the BodyBlock and the
- // ExitConditionBlock to represent the "loop back" transition. Create an
- // empty block to represent the transition block for looping back to the
- // head of the loop.
- // FIXME: Can we do this more efficiently without adding another block?
- Block = nullptr;
- Succ = BodyBlock;
- CFGBlock *LoopBackBlock = createBlock();
- LoopBackBlock->setLoopTarget(D);
+ // Add an intermediate block between the BodyBlock and the
+ // ExitConditionBlock to represent the "loop back" transition. Create an
+ // empty block to represent the transition block for looping back to the
+ // head of the loop.
+ // FIXME: Can we do this more efficiently without adding another block?
+ Block = nullptr;
+ Succ = BodyBlock;
+ CFGBlock *LoopBackBlock = createBlock();
+ LoopBackBlock->setLoopTarget(D);
+ if (!KnownVal.isFalse())
// Add the loop body entry as a successor to the condition.
addSuccessor(ExitConditionBlock, LoopBackBlock);
- }
else
addSuccessor(ExitConditionBlock, nullptr);
}
@@ -3583,11 +3583,13 @@ CFGBlock *CFGBuilder::VisitCXXDeleteExpr(CXXDeleteExpr *DE,
autoCreateBlock();
appendStmt(Block, DE);
QualType DTy = DE->getDestroyedType();
- DTy = DTy.getNonReferenceType();
- CXXRecordDecl *RD = Context->getBaseElementType(DTy)->getAsCXXRecordDecl();
- if (RD) {
- if (RD->isCompleteDefinition() && !RD->hasTrivialDestructor())
- appendDeleteDtor(Block, RD, DE);
+ if (!DTy.isNull()) {
+ DTy = DTy.getNonReferenceType();
+ CXXRecordDecl *RD = Context->getBaseElementType(DTy)->getAsCXXRecordDecl();
+ if (RD) {
+ if (RD->isCompleteDefinition() && !RD->hasTrivialDestructor())
+ appendDeleteDtor(Block, RD, DE);
+ }
}
return VisitChildren(DE);
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index 1df093d85098..fdc9e6cee8e1 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -9,12 +9,14 @@ add_clang_library(clangAnalysis
CFGReachabilityAnalysis.cpp
CFGStmtMap.cpp
CallGraph.cpp
+ CloneDetection.cpp
CocoaConventions.cpp
Consumed.cpp
CodeInjector.cpp
Dominators.cpp
FormatString.cpp
LiveVariables.cpp
+ OSLog.cpp
ObjCNoReturn.cpp
PostOrderCFGView.cpp
PrintfFormatString.cpp
diff --git a/lib/Analysis/CallGraph.cpp b/lib/Analysis/CallGraph.cpp
index 9d522fe7c6c5..8c126b09d057 100644
--- a/lib/Analysis/CallGraph.cpp
+++ b/lib/Analysis/CallGraph.cpp
@@ -55,7 +55,7 @@ public:
void addCalledDecl(Decl *D) {
if (G->includeInGraph(D)) {
CallGraphNode *CalleeNode = G->getOrInsertNode(D);
- CallerNode->addCallee(CalleeNode, G);
+ CallerNode->addCallee(CalleeNode);
}
}
@@ -104,9 +104,7 @@ CallGraph::CallGraph() {
Root = getOrInsertNode(nullptr);
}
-CallGraph::~CallGraph() {
- llvm::DeleteContainerSeconds(FunctionMap);
-}
+CallGraph::~CallGraph() {}
bool CallGraph::includeInGraph(const Decl *D) {
assert(D);
@@ -142,22 +140,22 @@ void CallGraph::addNodeForDecl(Decl* D, bool IsGlobal) {
CallGraphNode *CallGraph::getNode(const Decl *F) const {
FunctionMapTy::const_iterator I = FunctionMap.find(F);
if (I == FunctionMap.end()) return nullptr;
- return I->second;
+ return I->second.get();
}
CallGraphNode *CallGraph::getOrInsertNode(Decl *F) {
if (F && !isa<ObjCMethodDecl>(F))
F = F->getCanonicalDecl();
- CallGraphNode *&Node = FunctionMap[F];
+ std::unique_ptr<CallGraphNode> &Node = FunctionMap[F];
if (Node)
- return Node;
+ return Node.get();
- Node = new CallGraphNode(F);
+ Node = llvm::make_unique<CallGraphNode>(F);
// Make Root node a parent of all functions to make sure all are reachable.
if (F)
- Root->addCallee(Node, this);
- return Node;
+ Root->addCallee(Node.get());
+ return Node.get();
}
void CallGraph::print(raw_ostream &OS) const {
diff --git a/lib/Analysis/CloneDetection.cpp b/lib/Analysis/CloneDetection.cpp
new file mode 100644
index 000000000000..e761738214c6
--- /dev/null
+++ b/lib/Analysis/CloneDetection.cpp
@@ -0,0 +1,894 @@
+//===--- CloneDetection.cpp - Finds code clones in an AST -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file implements classes for searching and anlyzing source code clones.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/CloneDetection.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/MD5.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+StmtSequence::StmtSequence(const CompoundStmt *Stmt, ASTContext &Context,
+ unsigned StartIndex, unsigned EndIndex)
+ : S(Stmt), Context(&Context), StartIndex(StartIndex), EndIndex(EndIndex) {
+ assert(Stmt && "Stmt must not be a nullptr");
+ assert(StartIndex < EndIndex && "Given array should not be empty");
+ assert(EndIndex <= Stmt->size() && "Given array too big for this Stmt");
+}
+
+StmtSequence::StmtSequence(const Stmt *Stmt, ASTContext &Context)
+ : S(Stmt), Context(&Context), StartIndex(0), EndIndex(0) {}
+
+StmtSequence::StmtSequence()
+ : S(nullptr), Context(nullptr), StartIndex(0), EndIndex(0) {}
+
+bool StmtSequence::contains(const StmtSequence &Other) const {
+ // If both sequences reside in different translation units, they can never
+ // contain each other.
+ if (Context != Other.Context)
+ return false;
+
+ const SourceManager &SM = Context->getSourceManager();
+
+ // Otherwise check if the start and end locations of the current sequence
+ // surround the other sequence.
+ bool StartIsInBounds =
+ SM.isBeforeInTranslationUnit(getStartLoc(), Other.getStartLoc()) ||
+ getStartLoc() == Other.getStartLoc();
+ if (!StartIsInBounds)
+ return false;
+
+ bool EndIsInBounds =
+ SM.isBeforeInTranslationUnit(Other.getEndLoc(), getEndLoc()) ||
+ Other.getEndLoc() == getEndLoc();
+ return EndIsInBounds;
+}
+
+StmtSequence::iterator StmtSequence::begin() const {
+ if (!holdsSequence()) {
+ return &S;
+ }
+ auto CS = cast<CompoundStmt>(S);
+ return CS->body_begin() + StartIndex;
+}
+
+StmtSequence::iterator StmtSequence::end() const {
+ if (!holdsSequence()) {
+ return reinterpret_cast<StmtSequence::iterator>(&S) + 1;
+ }
+ auto CS = cast<CompoundStmt>(S);
+ return CS->body_begin() + EndIndex;
+}
+
+SourceLocation StmtSequence::getStartLoc() const {
+ return front()->getLocStart();
+}
+
+SourceLocation StmtSequence::getEndLoc() const { return back()->getLocEnd(); }
+
+SourceRange StmtSequence::getSourceRange() const {
+ return SourceRange(getStartLoc(), getEndLoc());
+}
+
+namespace {
+
+/// \brief Analyzes the pattern of the referenced variables in a statement.
+class VariablePattern {
+
+ /// \brief Describes an occurence of a variable reference in a statement.
+ struct VariableOccurence {
+ /// The index of the associated VarDecl in the Variables vector.
+ size_t KindID;
+ /// The statement in the code where the variable was referenced.
+ const Stmt *Mention;
+
+ VariableOccurence(size_t KindID, const Stmt *Mention)
+ : KindID(KindID), Mention(Mention) {}
+ };
+
+ /// All occurences of referenced variables in the order of appearance.
+ std::vector<VariableOccurence> Occurences;
+ /// List of referenced variables in the order of appearance.
+ /// Every item in this list is unique.
+ std::vector<const VarDecl *> Variables;
+
+ /// \brief Adds a new variable referenced to this pattern.
+ /// \param VarDecl The declaration of the variable that is referenced.
+ /// \param Mention The SourceRange where this variable is referenced.
+ void addVariableOccurence(const VarDecl *VarDecl, const Stmt *Mention) {
+ // First check if we already reference this variable
+ for (size_t KindIndex = 0; KindIndex < Variables.size(); ++KindIndex) {
+ if (Variables[KindIndex] == VarDecl) {
+ // If yes, add a new occurence that points to the existing entry in
+ // the Variables vector.
+ Occurences.emplace_back(KindIndex, Mention);
+ return;
+ }
+ }
+ // If this variable wasn't already referenced, add it to the list of
+ // referenced variables and add a occurence that points to this new entry.
+ Occurences.emplace_back(Variables.size(), Mention);
+ Variables.push_back(VarDecl);
+ }
+
+ /// \brief Adds each referenced variable from the given statement.
+ void addVariables(const Stmt *S) {
+ // Sometimes we get a nullptr (such as from IfStmts which often have nullptr
+ // children). We skip such statements as they don't reference any
+ // variables.
+ if (!S)
+ return;
+
+ // Check if S is a reference to a variable. If yes, add it to the pattern.
+ if (auto D = dyn_cast<DeclRefExpr>(S)) {
+ if (auto VD = dyn_cast<VarDecl>(D->getDecl()->getCanonicalDecl()))
+ addVariableOccurence(VD, D);
+ }
+
+ // Recursively check all children of the given statement.
+ for (const Stmt *Child : S->children()) {
+ addVariables(Child);
+ }
+ }
+
+public:
+ /// \brief Creates an VariablePattern object with information about the given
+ /// StmtSequence.
+ VariablePattern(const StmtSequence &Sequence) {
+ for (const Stmt *S : Sequence)
+ addVariables(S);
+ }
+
+ /// \brief Counts the differences between this pattern and the given one.
+ /// \param Other The given VariablePattern to compare with.
+ /// \param FirstMismatch Output parameter that will be filled with information
+ /// about the first difference between the two patterns. This parameter
+ /// can be a nullptr, in which case it will be ignored.
+ /// \return Returns the number of differences between the pattern this object
+ /// is following and the given VariablePattern.
+ ///
+ /// For example, the following statements all have the same pattern and this
+ /// function would return zero:
+ ///
+ /// if (a < b) return a; return b;
+ /// if (x < y) return x; return y;
+ /// if (u2 < u1) return u2; return u1;
+ ///
+ /// But the following statement has a different pattern (note the changed
+ /// variables in the return statements) and would have two differences when
+ /// compared with one of the statements above.
+ ///
+ /// if (a < b) return b; return a;
+ ///
+ /// This function should only be called if the related statements of the given
+ /// pattern and the statements of this objects are clones of each other.
+ unsigned countPatternDifferences(
+ const VariablePattern &Other,
+ CloneDetector::SuspiciousClonePair *FirstMismatch = nullptr) {
+ unsigned NumberOfDifferences = 0;
+
+ assert(Other.Occurences.size() == Occurences.size());
+ for (unsigned i = 0; i < Occurences.size(); ++i) {
+ auto ThisOccurence = Occurences[i];
+ auto OtherOccurence = Other.Occurences[i];
+ if (ThisOccurence.KindID == OtherOccurence.KindID)
+ continue;
+
+ ++NumberOfDifferences;
+
+ // If FirstMismatch is not a nullptr, we need to store information about
+ // the first difference between the two patterns.
+ if (FirstMismatch == nullptr)
+ continue;
+
+ // Only proceed if we just found the first difference as we only store
+ // information about the first difference.
+ if (NumberOfDifferences != 1)
+ continue;
+
+ const VarDecl *FirstSuggestion = nullptr;
+ // If there is a variable available in the list of referenced variables
+ // which wouldn't break the pattern if it is used in place of the
+ // current variable, we provide this variable as the suggested fix.
+ if (OtherOccurence.KindID < Variables.size())
+ FirstSuggestion = Variables[OtherOccurence.KindID];
+
+ // Store information about the first clone.
+ FirstMismatch->FirstCloneInfo =
+ CloneDetector::SuspiciousClonePair::SuspiciousCloneInfo(
+ Variables[ThisOccurence.KindID], ThisOccurence.Mention,
+ FirstSuggestion);
+
+ // Same as above but with the other clone. We do this for both clones as
+ // we don't know which clone is the one containing the unintended
+ // pattern error.
+ const VarDecl *SecondSuggestion = nullptr;
+ if (ThisOccurence.KindID < Other.Variables.size())
+ SecondSuggestion = Other.Variables[ThisOccurence.KindID];
+
+ // Store information about the second clone.
+ FirstMismatch->SecondCloneInfo =
+ CloneDetector::SuspiciousClonePair::SuspiciousCloneInfo(
+ Other.Variables[OtherOccurence.KindID], OtherOccurence.Mention,
+ SecondSuggestion);
+
+ // SuspiciousClonePair guarantees that the first clone always has a
+ // suggested variable associated with it. As we know that one of the two
+ // clones in the pair always has suggestion, we swap the two clones
+ // in case the first clone has no suggested variable which means that
+ // the second clone has a suggested variable and should be first.
+ if (!FirstMismatch->FirstCloneInfo.Suggestion)
+ std::swap(FirstMismatch->FirstCloneInfo,
+ FirstMismatch->SecondCloneInfo);
+
+ // This ensures that we always have at least one suggestion in a pair.
+ assert(FirstMismatch->FirstCloneInfo.Suggestion);
+ }
+
+ return NumberOfDifferences;
+ }
+};
+}
+
+/// \brief Prints the macro name that contains the given SourceLocation into
+/// the given raw_string_ostream.
+static void printMacroName(llvm::raw_string_ostream &MacroStack,
+ ASTContext &Context, SourceLocation Loc) {
+ MacroStack << Lexer::getImmediateMacroName(Loc, Context.getSourceManager(),
+ Context.getLangOpts());
+
+ // Add an empty space at the end as a padding to prevent
+ // that macro names concatenate to the names of other macros.
+ MacroStack << " ";
+}
+
+/// \brief Returns a string that represents all macro expansions that
+/// expanded into the given SourceLocation.
+///
+/// If 'getMacroStack(A) == getMacroStack(B)' is true, then the SourceLocations
+/// A and B are expanded from the same macros in the same order.
+static std::string getMacroStack(SourceLocation Loc, ASTContext &Context) {
+ std::string MacroStack;
+ llvm::raw_string_ostream MacroStackStream(MacroStack);
+ SourceManager &SM = Context.getSourceManager();
+
+ // Iterate over all macros that expanded into the given SourceLocation.
+ while (Loc.isMacroID()) {
+ // Add the macro name to the stream.
+ printMacroName(MacroStackStream, Context, Loc);
+ Loc = SM.getImmediateMacroCallerLoc(Loc);
+ }
+ MacroStackStream.flush();
+ return MacroStack;
+}
+
+namespace {
+/// \brief Collects the data of a single Stmt.
+///
+/// This class defines what a code clone is: If it collects for two statements
+/// the same data, then those two statements are considered to be clones of each
+/// other.
+///
+/// All collected data is forwarded to the given data consumer of the type T.
+/// The data consumer class needs to provide a member method with the signature:
+/// update(StringRef Str)
+template <typename T>
+class StmtDataCollector : public ConstStmtVisitor<StmtDataCollector<T>> {
+
+ ASTContext &Context;
+ /// \brief The data sink to which all data is forwarded.
+ T &DataConsumer;
+
+public:
+ /// \brief Collects data of the given Stmt.
+ /// \param S The given statement.
+ /// \param Context The ASTContext of S.
+ /// \param DataConsumer The data sink to which all data is forwarded.
+ StmtDataCollector(const Stmt *S, ASTContext &Context, T &DataConsumer)
+ : Context(Context), DataConsumer(DataConsumer) {
+ this->Visit(S);
+ }
+
+ // Below are utility methods for appending different data to the vector.
+
+ void addData(CloneDetector::DataPiece Integer) {
+ DataConsumer.update(
+ StringRef(reinterpret_cast<char *>(&Integer), sizeof(Integer)));
+ }
+
+ void addData(llvm::StringRef Str) { DataConsumer.update(Str); }
+
+ void addData(const QualType &QT) { addData(QT.getAsString()); }
+
+// The functions below collect the class specific data of each Stmt subclass.
+
+// Utility macro for defining a visit method for a given class. This method
+// calls back to the ConstStmtVisitor to visit all parent classes.
+#define DEF_ADD_DATA(CLASS, CODE) \
+ void Visit##CLASS(const CLASS *S) { \
+ CODE; \
+ ConstStmtVisitor<StmtDataCollector>::Visit##CLASS(S); \
+ }
+
+ DEF_ADD_DATA(Stmt, {
+ addData(S->getStmtClass());
+ // This ensures that macro generated code isn't identical to macro-generated
+ // code.
+ addData(getMacroStack(S->getLocStart(), Context));
+ addData(getMacroStack(S->getLocEnd(), Context));
+ })
+ DEF_ADD_DATA(Expr, { addData(S->getType()); })
+
+ //--- Builtin functionality ----------------------------------------------//
+ DEF_ADD_DATA(ArrayTypeTraitExpr, { addData(S->getTrait()); })
+ DEF_ADD_DATA(ExpressionTraitExpr, { addData(S->getTrait()); })
+ DEF_ADD_DATA(PredefinedExpr, { addData(S->getIdentType()); })
+ DEF_ADD_DATA(TypeTraitExpr, {
+ addData(S->getTrait());
+ for (unsigned i = 0; i < S->getNumArgs(); ++i)
+ addData(S->getArg(i)->getType());
+ })
+
+ //--- Calls --------------------------------------------------------------//
+ DEF_ADD_DATA(CallExpr, {
+ // Function pointers don't have a callee and we just skip hashing it.
+ if (const FunctionDecl *D = S->getDirectCallee()) {
+ // If the function is a template specialization, we also need to handle
+ // the template arguments as they are not included in the qualified name.
+ if (auto Args = D->getTemplateSpecializationArgs()) {
+ std::string ArgString;
+
+ // Print all template arguments into ArgString
+ llvm::raw_string_ostream OS(ArgString);
+ for (unsigned i = 0; i < Args->size(); ++i) {
+ Args->get(i).print(Context.getLangOpts(), OS);
+ // Add a padding character so that 'foo<X, XX>()' != 'foo<XX, X>()'.
+ OS << '\n';
+ }
+ OS.flush();
+
+ addData(ArgString);
+ }
+ addData(D->getQualifiedNameAsString());
+ }
+ })
+
+ //--- Exceptions ---------------------------------------------------------//
+ DEF_ADD_DATA(CXXCatchStmt, { addData(S->getCaughtType()); })
+
+ //--- C++ OOP Stmts ------------------------------------------------------//
+ DEF_ADD_DATA(CXXDeleteExpr, {
+ addData(S->isArrayFormAsWritten());
+ addData(S->isGlobalDelete());
+ })
+
+ //--- Casts --------------------------------------------------------------//
+ DEF_ADD_DATA(ObjCBridgedCastExpr, { addData(S->getBridgeKind()); })
+
+ //--- Miscellaneous Exprs ------------------------------------------------//
+ DEF_ADD_DATA(BinaryOperator, { addData(S->getOpcode()); })
+ DEF_ADD_DATA(UnaryOperator, { addData(S->getOpcode()); })
+
+ //--- Control flow -------------------------------------------------------//
+ DEF_ADD_DATA(GotoStmt, { addData(S->getLabel()->getName()); })
+ DEF_ADD_DATA(IndirectGotoStmt, {
+ if (S->getConstantTarget())
+ addData(S->getConstantTarget()->getName());
+ })
+ DEF_ADD_DATA(LabelStmt, { addData(S->getDecl()->getName()); })
+ DEF_ADD_DATA(MSDependentExistsStmt, { addData(S->isIfExists()); })
+ DEF_ADD_DATA(AddrLabelExpr, { addData(S->getLabel()->getName()); })
+
+ //--- Objective-C --------------------------------------------------------//
+ DEF_ADD_DATA(ObjCIndirectCopyRestoreExpr, { addData(S->shouldCopy()); })
+ DEF_ADD_DATA(ObjCPropertyRefExpr, {
+ addData(S->isSuperReceiver());
+ addData(S->isImplicitProperty());
+ })
+ DEF_ADD_DATA(ObjCAtCatchStmt, { addData(S->hasEllipsis()); })
+
+ //--- Miscellaneous Stmts ------------------------------------------------//
+ DEF_ADD_DATA(CXXFoldExpr, {
+ addData(S->isRightFold());
+ addData(S->getOperator());
+ })
+ DEF_ADD_DATA(GenericSelectionExpr, {
+ for (unsigned i = 0; i < S->getNumAssocs(); ++i) {
+ addData(S->getAssocType(i));
+ }
+ })
+ DEF_ADD_DATA(LambdaExpr, {
+ for (const LambdaCapture &C : S->captures()) {
+ addData(C.isPackExpansion());
+ addData(C.getCaptureKind());
+ if (C.capturesVariable())
+ addData(C.getCapturedVar()->getType());
+ }
+ addData(S->isGenericLambda());
+ addData(S->isMutable());
+ })
+ DEF_ADD_DATA(DeclStmt, {
+ auto numDecls = std::distance(S->decl_begin(), S->decl_end());
+ addData(static_cast<CloneDetector::DataPiece>(numDecls));
+ for (const Decl *D : S->decls()) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ addData(VD->getType());
+ }
+ }
+ })
+ DEF_ADD_DATA(AsmStmt, {
+ addData(S->isSimple());
+ addData(S->isVolatile());
+ addData(S->generateAsmString(Context));
+ for (unsigned i = 0; i < S->getNumInputs(); ++i) {
+ addData(S->getInputConstraint(i));
+ }
+ for (unsigned i = 0; i < S->getNumOutputs(); ++i) {
+ addData(S->getOutputConstraint(i));
+ }
+ for (unsigned i = 0; i < S->getNumClobbers(); ++i) {
+ addData(S->getClobber(i));
+ }
+ })
+ DEF_ADD_DATA(AttributedStmt, {
+ for (const Attr *A : S->getAttrs()) {
+ addData(std::string(A->getSpelling()));
+ }
+ })
+};
+} // end anonymous namespace
+
+namespace {
+/// Generates CloneSignatures for a set of statements and stores the results in
+/// a CloneDetector object.
+class CloneSignatureGenerator {
+
+ CloneDetector &CD;
+ ASTContext &Context;
+
+ /// \brief Generates CloneSignatures for all statements in the given statement
+ /// tree and stores them in the CloneDetector.
+ ///
+ /// \param S The root of the given statement tree.
+ /// \param ParentMacroStack A string representing the macros that generated
+ /// the parent statement or an empty string if no
+ /// macros generated the parent statement.
+ /// See getMacroStack() for generating such a string.
+ /// \return The CloneSignature of the root statement.
+ CloneDetector::CloneSignature
+ generateSignatures(const Stmt *S, const std::string &ParentMacroStack) {
+ // Create an empty signature that will be filled in this method.
+ CloneDetector::CloneSignature Signature;
+
+ llvm::MD5 Hash;
+
+ // Collect all relevant data from S and hash it.
+ StmtDataCollector<llvm::MD5>(S, Context, Hash);
+
+ // Look up what macros expanded into the current statement.
+ std::string StartMacroStack = getMacroStack(S->getLocStart(), Context);
+ std::string EndMacroStack = getMacroStack(S->getLocEnd(), Context);
+
+ // First, check if ParentMacroStack is not empty which means we are currently
+ // dealing with a parent statement which was expanded from a macro.
+ // If this parent statement was expanded from the same macros as this
+ // statement, we reduce the initial complexity of this statement to zero.
+ // This causes that a group of statements that were generated by a single
+ // macro expansion will only increase the total complexity by one.
+ // Note: This is not the final complexity of this statement as we still
+ // add the complexity of the child statements to the complexity value.
+ if (!ParentMacroStack.empty() && (StartMacroStack == ParentMacroStack &&
+ EndMacroStack == ParentMacroStack)) {
+ Signature.Complexity = 0;
+ }
+
+ // Storage for the signatures of the direct child statements. This is only
+ // needed if the current statement is a CompoundStmt.
+ std::vector<CloneDetector::CloneSignature> ChildSignatures;
+ const CompoundStmt *CS = dyn_cast<const CompoundStmt>(S);
+
+ // The signature of a statement includes the signatures of its children.
+ // Therefore we create the signatures for every child and add them to the
+ // current signature.
+ for (const Stmt *Child : S->children()) {
+ // Some statements like 'if' can have nullptr children that we will skip.
+ if (!Child)
+ continue;
+
+ // Recursive call to create the signature of the child statement. This
+ // will also create and store all clone groups in this child statement.
+ // We pass only the StartMacroStack along to keep things simple.
+ auto ChildSignature = generateSignatures(Child, StartMacroStack);
+
+ // Add the collected data to the signature of the current statement.
+ Signature.Complexity += ChildSignature.Complexity;
+ Hash.update(StringRef(reinterpret_cast<char *>(&ChildSignature.Hash),
+ sizeof(ChildSignature.Hash)));
+
+ // If the current statement is a CompoundStatement, we need to store the
+ // signature for the generation of the sub-sequences.
+ if (CS)
+ ChildSignatures.push_back(ChildSignature);
+ }
+
+ // If the current statement is a CompoundStmt, we also need to create the
+ // clone groups from the sub-sequences inside the children.
+ if (CS)
+ handleSubSequences(CS, ChildSignatures);
+
+ // Create the final hash code for the current signature.
+ llvm::MD5::MD5Result HashResult;
+ Hash.final(HashResult);
+
+ // Copy as much of the generated hash code to the signature's hash code.
+ std::memcpy(&Signature.Hash, &HashResult,
+ std::min(sizeof(Signature.Hash), sizeof(HashResult)));
+
+ // Save the signature for the current statement in the CloneDetector object.
+ CD.add(StmtSequence(S, Context), Signature);
+
+ return Signature;
+ }
+
+ /// \brief Adds all possible sub-sequences in the child array of the given
+ /// CompoundStmt to the CloneDetector.
+ /// \param CS The given CompoundStmt.
+ /// \param ChildSignatures A list of calculated signatures for each child in
+ /// the given CompoundStmt.
+ void handleSubSequences(
+ const CompoundStmt *CS,
+ const std::vector<CloneDetector::CloneSignature> &ChildSignatures) {
+
+ // FIXME: This function has quadratic runtime right now. Check if skipping
+ // this function for too long CompoundStmts is an option.
+
+ // The length of the sub-sequence. We don't need to handle sequences with
+ // the length 1 as they are already handled in CollectData().
+ for (unsigned Length = 2; Length <= CS->size(); ++Length) {
+ // The start index in the body of the CompoundStmt. We increase the
+ // position until the end of the sub-sequence reaches the end of the
+ // CompoundStmt body.
+ for (unsigned Pos = 0; Pos <= CS->size() - Length; ++Pos) {
+ // Create an empty signature and add the signatures of all selected
+ // child statements to it.
+ CloneDetector::CloneSignature SubSignature;
+ llvm::MD5 SubHash;
+
+ for (unsigned i = Pos; i < Pos + Length; ++i) {
+ SubSignature.Complexity += ChildSignatures[i].Complexity;
+ size_t ChildHash = ChildSignatures[i].Hash;
+
+ SubHash.update(StringRef(reinterpret_cast<char *>(&ChildHash),
+ sizeof(ChildHash)));
+ }
+
+ // Create the final hash code for the current signature.
+ llvm::MD5::MD5Result HashResult;
+ SubHash.final(HashResult);
+
+ // Copy as much of the generated hash code to the signature's hash code.
+ std::memcpy(&SubSignature.Hash, &HashResult,
+ std::min(sizeof(SubSignature.Hash), sizeof(HashResult)));
+
+ // Save the signature together with the information about what children
+ // sequence we selected.
+ CD.add(StmtSequence(CS, Context, Pos, Pos + Length), SubSignature);
+ }
+ }
+ }
+
+public:
+ explicit CloneSignatureGenerator(CloneDetector &CD, ASTContext &Context)
+ : CD(CD), Context(Context) {}
+
+ /// \brief Generates signatures for all statements in the given function body.
+ void consumeCodeBody(const Stmt *S) { generateSignatures(S, ""); }
+};
+} // end anonymous namespace
+
+void CloneDetector::analyzeCodeBody(const Decl *D) {
+ assert(D);
+ assert(D->hasBody());
+ CloneSignatureGenerator Generator(*this, D->getASTContext());
+ Generator.consumeCodeBody(D->getBody());
+}
+
+void CloneDetector::add(const StmtSequence &S,
+ const CloneSignature &Signature) {
+ Sequences.push_back(std::make_pair(Signature, S));
+}
+
+namespace {
+/// \brief Returns true if and only if \p Stmt contains at least one other
+/// sequence in the \p Group.
+bool containsAnyInGroup(StmtSequence &Stmt, CloneDetector::CloneGroup &Group) {
+ for (StmtSequence &GroupStmt : Group.Sequences) {
+ if (Stmt.contains(GroupStmt))
+ return true;
+ }
+ return false;
+}
+
+/// \brief Returns true if and only if all sequences in \p OtherGroup are
+/// contained by a sequence in \p Group.
+bool containsGroup(CloneDetector::CloneGroup &Group,
+ CloneDetector::CloneGroup &OtherGroup) {
+ // We have less sequences in the current group than we have in the other,
+ // so we will never fulfill the requirement for returning true. This is only
+ // possible because we know that a sequence in Group can contain at most
+ // one sequence in OtherGroup.
+ if (Group.Sequences.size() < OtherGroup.Sequences.size())
+ return false;
+
+ for (StmtSequence &Stmt : Group.Sequences) {
+ if (!containsAnyInGroup(Stmt, OtherGroup))
+ return false;
+ }
+ return true;
+}
+} // end anonymous namespace
+
+namespace {
+/// \brief Wrapper around FoldingSetNodeID that it can be used as the template
+/// argument of the StmtDataCollector.
+class FoldingSetNodeIDWrapper {
+
+ llvm::FoldingSetNodeID &FS;
+
+public:
+ FoldingSetNodeIDWrapper(llvm::FoldingSetNodeID &FS) : FS(FS) {}
+
+ void update(StringRef Str) { FS.AddString(Str); }
+};
+} // end anonymous namespace
+
+/// \brief Writes the relevant data from all statements and child statements
+/// in the given StmtSequence into the given FoldingSetNodeID.
+static void CollectStmtSequenceData(const StmtSequence &Sequence,
+ FoldingSetNodeIDWrapper &OutputData) {
+ for (const Stmt *S : Sequence) {
+ StmtDataCollector<FoldingSetNodeIDWrapper>(S, Sequence.getASTContext(),
+ OutputData);
+
+ for (const Stmt *Child : S->children()) {
+ if (!Child)
+ continue;
+
+ CollectStmtSequenceData(StmtSequence(Child, Sequence.getASTContext()),
+ OutputData);
+ }
+ }
+}
+
+/// \brief Returns true if both sequences are clones of each other.
+static bool areSequencesClones(const StmtSequence &LHS,
+ const StmtSequence &RHS) {
+ // We collect the data from all statements in the sequence as we did before
+ // when generating a hash value for each sequence. But this time we don't
+ // hash the collected data and compare the whole data set instead. This
+ // prevents any false-positives due to hash code collisions.
+ llvm::FoldingSetNodeID DataLHS, DataRHS;
+ FoldingSetNodeIDWrapper LHSWrapper(DataLHS);
+ FoldingSetNodeIDWrapper RHSWrapper(DataRHS);
+
+ CollectStmtSequenceData(LHS, LHSWrapper);
+ CollectStmtSequenceData(RHS, RHSWrapper);
+
+ return DataLHS == DataRHS;
+}
+
+/// \brief Finds all actual clone groups in a single group of presumed clones.
+/// \param Result Output parameter to which all found groups are added.
+/// \param Group A group of presumed clones. The clones are allowed to have a
+/// different variable pattern and may not be actual clones of each
+/// other.
+/// \param CheckVariablePattern If true, every clone in a group that was added
+/// to the output follows the same variable pattern as the other
+/// clones in its group.
+static void createCloneGroups(std::vector<CloneDetector::CloneGroup> &Result,
+ const CloneDetector::CloneGroup &Group,
+ bool CheckVariablePattern) {
+ // We remove the Sequences one by one, so a list is more appropriate.
+ std::list<StmtSequence> UnassignedSequences(Group.Sequences.begin(),
+ Group.Sequences.end());
+
+ // Search for clones as long as there could be clones in UnassignedSequences.
+ while (UnassignedSequences.size() > 1) {
+
+ // Pick the first Sequence as a protoype for a new clone group.
+ StmtSequence Prototype = UnassignedSequences.front();
+ UnassignedSequences.pop_front();
+
+ CloneDetector::CloneGroup FilteredGroup(Prototype, Group.Signature);
+
+ // Analyze the variable pattern of the prototype. Every other StmtSequence
+ // needs to have the same pattern to get into the new clone group.
+ VariablePattern PrototypeFeatures(Prototype);
+
+ // Search all remaining StmtSequences for an identical variable pattern
+ // and assign them to our new clone group.
+ auto I = UnassignedSequences.begin(), E = UnassignedSequences.end();
+ while (I != E) {
+ // If the sequence doesn't fit to the prototype, we have encountered
+ // an unintended hash code collision and we skip it.
+ if (!areSequencesClones(Prototype, *I)) {
+ ++I;
+ continue;
+ }
+
+ // If we weren't asked to check for a matching variable pattern in clone
+ // groups we can add the sequence now to the new clone group.
+ // If we were asked to check for matching variable pattern, we first have
+ // to check that there are no differences between the two patterns and
+ // only proceed if they match.
+ if (!CheckVariablePattern ||
+ VariablePattern(*I).countPatternDifferences(PrototypeFeatures) == 0) {
+ FilteredGroup.Sequences.push_back(*I);
+ I = UnassignedSequences.erase(I);
+ continue;
+ }
+
+ // We didn't found a matching variable pattern, so we continue with the
+ // next sequence.
+ ++I;
+ }
+
+ // Add a valid clone group to the list of found clone groups.
+ if (!FilteredGroup.isValid())
+ continue;
+
+ Result.push_back(FilteredGroup);
+ }
+}
+
+void CloneDetector::findClones(std::vector<CloneGroup> &Result,
+ unsigned MinGroupComplexity,
+ bool CheckPatterns) {
+ // A shortcut (and necessary for the for-loop later in this function).
+ if (Sequences.empty())
+ return;
+
+ // We need to search for groups of StmtSequences with the same hash code to
+ // create our initial clone groups. By sorting all known StmtSequences by
+ // their hash value we make sure that StmtSequences with the same hash code
+ // are grouped together in the Sequences vector.
+ // Note: We stable sort here because the StmtSequences are added in the order
+ // in which they appear in the source file. We want to preserve that order
+ // because we also want to report them in that order in the CloneChecker.
+ std::stable_sort(Sequences.begin(), Sequences.end(),
+ [](std::pair<CloneSignature, StmtSequence> LHS,
+ std::pair<CloneSignature, StmtSequence> RHS) {
+ return LHS.first.Hash < RHS.first.Hash;
+ });
+
+ std::vector<CloneGroup> CloneGroups;
+
+ // Check for each CloneSignature if its successor has the same hash value.
+ // We don't check the last CloneSignature as it has no successor.
+ // Note: The 'size - 1' in the condition is safe because we check for an empty
+ // Sequences vector at the beginning of this function.
+ for (unsigned i = 0; i < Sequences.size() - 1; ++i) {
+ const auto Current = Sequences[i];
+ const auto Next = Sequences[i + 1];
+
+ if (Current.first.Hash != Next.first.Hash)
+ continue;
+
+ // It's likely that we just found an sequence of CloneSignatures that
+ // represent a CloneGroup, so we create a new group and start checking and
+ // adding the CloneSignatures in this sequence.
+ CloneGroup Group;
+ Group.Signature = Current.first;
+
+ for (; i < Sequences.size(); ++i) {
+ const auto &Signature = Sequences[i];
+
+ // A different hash value means we have reached the end of the sequence.
+ if (Current.first.Hash != Signature.first.Hash) {
+ // The current Signature could be the start of a new CloneGroup. So we
+ // decrement i so that we visit it again in the outer loop.
+ // Note: i can never be 0 at this point because we are just comparing
+ // the hash of the Current CloneSignature with itself in the 'if' above.
+ assert(i != 0);
+ --i;
+ break;
+ }
+
+ // Skip CloneSignatures that won't pass the complexity requirement.
+ if (Signature.first.Complexity < MinGroupComplexity)
+ continue;
+
+ Group.Sequences.push_back(Signature.second);
+ }
+
+ // There is a chance that we haven't found more than two fitting
+ // CloneSignature because not enough CloneSignatures passed the complexity
+ // requirement. As a CloneGroup with less than two members makes no sense,
+ // we ignore this CloneGroup and won't add it to the result.
+ if (!Group.isValid())
+ continue;
+
+ CloneGroups.push_back(Group);
+ }
+
+ // Add every valid clone group that fulfills the complexity requirement.
+ for (const CloneGroup &Group : CloneGroups) {
+ createCloneGroups(Result, Group, CheckPatterns);
+ }
+
+ std::vector<unsigned> IndexesToRemove;
+
+ // Compare every group in the result with the rest. If one groups contains
+ // another group, we only need to return the bigger group.
+ // Note: This doesn't scale well, so if possible avoid calling any heavy
+ // function from this loop to minimize the performance impact.
+ for (unsigned i = 0; i < Result.size(); ++i) {
+ for (unsigned j = 0; j < Result.size(); ++j) {
+ // Don't compare a group with itself.
+ if (i == j)
+ continue;
+
+ if (containsGroup(Result[j], Result[i])) {
+ IndexesToRemove.push_back(i);
+ break;
+ }
+ }
+ }
+
+ // Erasing a list of indexes from the vector should be done with decreasing
+ // indexes. As IndexesToRemove is constructed with increasing values, we just
+ // reverse iterate over it to get the desired order.
+ for (auto I = IndexesToRemove.rbegin(); I != IndexesToRemove.rend(); ++I) {
+ Result.erase(Result.begin() + *I);
+ }
+}
+
+void CloneDetector::findSuspiciousClones(
+ std::vector<CloneDetector::SuspiciousClonePair> &Result,
+ unsigned MinGroupComplexity) {
+ std::vector<CloneGroup> Clones;
+ // Reuse the normal search for clones but specify that the clone groups don't
+ // need to have a common referenced variable pattern so that we can manually
+ // search for the kind of pattern errors this function is supposed to find.
+ findClones(Clones, MinGroupComplexity, false);
+
+ for (const CloneGroup &Group : Clones) {
+ for (unsigned i = 0; i < Group.Sequences.size(); ++i) {
+ VariablePattern PatternA(Group.Sequences[i]);
+
+ for (unsigned j = i + 1; j < Group.Sequences.size(); ++j) {
+ VariablePattern PatternB(Group.Sequences[j]);
+
+ CloneDetector::SuspiciousClonePair ClonePair;
+ // For now, we only report clones which break the variable pattern just
+ // once because multiple differences in a pattern are an indicator that
+ // those differences are maybe intended (e.g. because it's actually
+ // a different algorithm).
+ // TODO: In very big clones even multiple variables can be unintended,
+ // so replacing this number with a percentage could better handle such
+ // cases. On the other hand it could increase the false-positive rate
+ // for all clones if the percentage is too high.
+ if (PatternA.countPatternDifferences(PatternB, &ClonePair) == 1) {
+ Result.push_back(ClonePair);
+ break;
+ }
+ }
+ }
+ }
+}
diff --git a/lib/Analysis/Consumed.cpp b/lib/Analysis/Consumed.cpp
index 47bef1b927c9..f6fe78ac4619 100644
--- a/lib/Analysis/Consumed.cpp
+++ b/lib/Analysis/Consumed.cpp
@@ -12,6 +12,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Analysis/Analyses/Consumed.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
@@ -20,16 +21,12 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/Type.h"
-#include "clang/Analysis/Analyses/Consumed.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/raw_ostream.h"
#include <memory>
// TODO: Adjust states of args to constructors in the same way that arguments to
diff --git a/lib/Analysis/FormatString.cpp b/lib/Analysis/FormatString.cpp
index 83d08b55427f..c62e537e92dd 100644
--- a/lib/Analysis/FormatString.cpp
+++ b/lib/Analysis/FormatString.cpp
@@ -266,14 +266,15 @@ bool clang::analyze_format_string::ParseUTF8InvalidSpecifier(
if (SpecifierBegin + 1 >= FmtStrEnd)
return false;
- const UTF8 *SB = reinterpret_cast<const UTF8 *>(SpecifierBegin + 1);
- const UTF8 *SE = reinterpret_cast<const UTF8 *>(FmtStrEnd);
+ const llvm::UTF8 *SB =
+ reinterpret_cast<const llvm::UTF8 *>(SpecifierBegin + 1);
+ const llvm::UTF8 *SE = reinterpret_cast<const llvm::UTF8 *>(FmtStrEnd);
const char FirstByte = *SB;
// If the invalid specifier is a multibyte UTF-8 string, return the
// total length accordingly so that the conversion specifier can be
// properly updated to reflect a complete UTF-8 specifier.
- unsigned NumBytes = getNumBytesForUTF8(FirstByte);
+ unsigned NumBytes = llvm::getNumBytesForUTF8(FirstByte);
if (NumBytes == 1)
return false;
if (SB + NumBytes > SE)
@@ -310,8 +311,13 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
return Match;
case AnyCharTy: {
- if (const EnumType *ETy = argTy->getAs<EnumType>())
+ if (const EnumType *ETy = argTy->getAs<EnumType>()) {
+ // If the enum is incomplete we know nothing about the underlying type.
+ // Assume that it's 'int'.
+ if (!ETy->getDecl()->isComplete())
+ return NoMatch;
argTy = ETy->getDecl()->getIntegerType();
+ }
if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
switch (BT->getKind()) {
@@ -327,8 +333,14 @@ ArgType::matchesType(ASTContext &C, QualType argTy) const {
}
case SpecificTy: {
- if (const EnumType *ETy = argTy->getAs<EnumType>())
- argTy = ETy->getDecl()->getIntegerType();
+ if (const EnumType *ETy = argTy->getAs<EnumType>()) {
+ // If the enum is incomplete we know nothing about the underlying type.
+ // Assume that it's 'int'.
+ if (!ETy->getDecl()->isComplete())
+ argTy = C.IntTy;
+ else
+ argTy = ETy->getDecl()->getIntegerType();
+ }
argTy = C.getCanonicalType(argTy).getUnqualifiedType();
if (T == argTy)
@@ -579,6 +591,8 @@ const char *ConversionSpecifier::toString() const {
case cArg: return "c";
case sArg: return "s";
case pArg: return "p";
+ case PArg:
+ return "P";
case nArg: return "n";
case PercentArg: return "%";
case ScanListArg: return "[";
@@ -854,6 +868,7 @@ bool FormatSpecifier::hasStandardConversionSpecifier(
case ConversionSpecifier::ObjCObjArg:
case ConversionSpecifier::ScanListArg:
case ConversionSpecifier::PercentArg:
+ case ConversionSpecifier::PArg:
return true;
case ConversionSpecifier::CArg:
case ConversionSpecifier::SArg:
diff --git a/lib/Analysis/FormatStringParsing.h b/lib/Analysis/FormatStringParsing.h
index 8463fcec5bf4..17fd2f6aefb8 100644
--- a/lib/Analysis/FormatStringParsing.h
+++ b/lib/Analysis/FormatStringParsing.h
@@ -4,7 +4,6 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/FormatString.h"
-#include "llvm/Support/raw_ostream.h"
namespace clang {
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index 5e0a9a0d73c8..cd73a62e6918 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -19,6 +19,7 @@
#include "clang/Analysis/CFG.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/PriorityQueue.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <vector>
@@ -28,20 +29,21 @@ using namespace clang;
namespace {
class DataflowWorklist {
- SmallVector<const CFGBlock *, 20> worklist;
llvm::BitVector enqueuedBlocks;
PostOrderCFGView *POV;
+ llvm::PriorityQueue<const CFGBlock *, SmallVector<const CFGBlock *, 20>,
+ PostOrderCFGView::BlockOrderCompare> worklist;
+
public:
DataflowWorklist(const CFG &cfg, AnalysisDeclContext &Ctx)
: enqueuedBlocks(cfg.getNumBlockIDs()),
- POV(Ctx.getAnalysis<PostOrderCFGView>()) {}
+ POV(Ctx.getAnalysis<PostOrderCFGView>()),
+ worklist(POV->getComparator()) {}
void enqueueBlock(const CFGBlock *block);
void enqueuePredecessors(const CFGBlock *block);
const CFGBlock *dequeue();
-
- void sortWorklist();
};
}
@@ -49,31 +51,22 @@ public:
void DataflowWorklist::enqueueBlock(const clang::CFGBlock *block) {
if (block && !enqueuedBlocks[block->getBlockID()]) {
enqueuedBlocks[block->getBlockID()] = true;
- worklist.push_back(block);
+ worklist.push(block);
}
}
void DataflowWorklist::enqueuePredecessors(const clang::CFGBlock *block) {
- const unsigned OldWorklistSize = worklist.size();
for (CFGBlock::const_pred_iterator I = block->pred_begin(),
E = block->pred_end(); I != E; ++I) {
enqueueBlock(*I);
}
-
- if (OldWorklistSize == 0 || OldWorklistSize == worklist.size())
- return;
-
- sortWorklist();
-}
-
-void DataflowWorklist::sortWorklist() {
- std::sort(worklist.begin(), worklist.end(), POV->getComparator());
}
const CFGBlock *DataflowWorklist::dequeue() {
if (worklist.empty())
return nullptr;
- const CFGBlock *b = worklist.pop_back_val();
+ const CFGBlock *b = worklist.top();
+ worklist.pop();
enqueuedBlocks[b->getBlockID()] = false;
return b;
}
@@ -528,8 +521,6 @@ LiveVariables::computeLiveness(AnalysisDeclContext &AC,
}
}
- worklist.sortWorklist();
-
while (const CFGBlock *block = worklist.dequeue()) {
// Determine if the block's end value has changed. If not, we
// have nothing left to do for this block.
diff --git a/lib/Analysis/OSLog.cpp b/lib/Analysis/OSLog.cpp
new file mode 100644
index 000000000000..3e13a153c65f
--- /dev/null
+++ b/lib/Analysis/OSLog.cpp
@@ -0,0 +1,202 @@
+// TODO: header template
+
+#include "clang/Analysis/Analyses/OSLog.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/ADT/SmallBitVector.h"
+
+using namespace clang;
+using llvm::APInt;
+
+using clang::analyze_os_log::OSLogBufferItem;
+using clang::analyze_os_log::OSLogBufferLayout;
+
+class OSLogFormatStringHandler
+ : public analyze_format_string::FormatStringHandler {
+private:
+ struct ArgData {
+ const Expr *E = nullptr;
+ Optional<OSLogBufferItem::Kind> Kind;
+ Optional<unsigned> Size;
+ Optional<const Expr *> Count;
+ Optional<const Expr *> Precision;
+ Optional<const Expr *> FieldWidth;
+ unsigned char Flags = 0;
+ };
+ SmallVector<ArgData, 4> ArgsData;
+ ArrayRef<const Expr *> Args;
+
+ OSLogBufferItem::Kind
+ getKind(analyze_format_string::ConversionSpecifier::Kind K) {
+ switch (K) {
+ case clang::analyze_format_string::ConversionSpecifier::sArg: // "%s"
+ return OSLogBufferItem::StringKind;
+ case clang::analyze_format_string::ConversionSpecifier::SArg: // "%S"
+ return OSLogBufferItem::WideStringKind;
+ case clang::analyze_format_string::ConversionSpecifier::PArg: { // "%P"
+ return OSLogBufferItem::PointerKind;
+ case clang::analyze_format_string::ConversionSpecifier::ObjCObjArg: // "%@"
+ return OSLogBufferItem::ObjCObjKind;
+ case clang::analyze_format_string::ConversionSpecifier::PrintErrno: // "%m"
+ return OSLogBufferItem::ErrnoKind;
+ default:
+ return OSLogBufferItem::ScalarKind;
+ }
+ }
+ }
+
+public:
+ OSLogFormatStringHandler(ArrayRef<const Expr *> Args) : Args(Args) {
+ ArgsData.reserve(Args.size());
+ }
+
+ virtual bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
+ const char *StartSpecifier,
+ unsigned SpecifierLen) {
+ if (!FS.consumesDataArgument() &&
+ FS.getConversionSpecifier().getKind() !=
+ clang::analyze_format_string::ConversionSpecifier::PrintErrno)
+ return true;
+
+ ArgsData.emplace_back();
+ unsigned ArgIndex = FS.getArgIndex();
+ if (ArgIndex < Args.size())
+ ArgsData.back().E = Args[ArgIndex];
+
+ // First get the Kind
+ ArgsData.back().Kind = getKind(FS.getConversionSpecifier().getKind());
+ if (ArgsData.back().Kind != OSLogBufferItem::ErrnoKind &&
+ !ArgsData.back().E) {
+ // missing argument
+ ArgsData.pop_back();
+ return false;
+ }
+
+ switch (FS.getConversionSpecifier().getKind()) {
+ case clang::analyze_format_string::ConversionSpecifier::sArg: // "%s"
+ case clang::analyze_format_string::ConversionSpecifier::SArg: { // "%S"
+ auto &precision = FS.getPrecision();
+ switch (precision.getHowSpecified()) {
+ case clang::analyze_format_string::OptionalAmount::NotSpecified: // "%s"
+ break;
+ case clang::analyze_format_string::OptionalAmount::Constant: // "%.16s"
+ ArgsData.back().Size = precision.getConstantAmount();
+ break;
+ case clang::analyze_format_string::OptionalAmount::Arg: // "%.*s"
+ ArgsData.back().Count = Args[precision.getArgIndex()];
+ break;
+ case clang::analyze_format_string::OptionalAmount::Invalid:
+ return false;
+ }
+ break;
+ }
+ case clang::analyze_format_string::ConversionSpecifier::PArg: { // "%P"
+ auto &precision = FS.getPrecision();
+ switch (precision.getHowSpecified()) {
+ case clang::analyze_format_string::OptionalAmount::NotSpecified: // "%P"
+ return false; // length must be supplied with pointer format specifier
+ case clang::analyze_format_string::OptionalAmount::Constant: // "%.16P"
+ ArgsData.back().Size = precision.getConstantAmount();
+ break;
+ case clang::analyze_format_string::OptionalAmount::Arg: // "%.*P"
+ ArgsData.back().Count = Args[precision.getArgIndex()];
+ break;
+ case clang::analyze_format_string::OptionalAmount::Invalid:
+ return false;
+ }
+ break;
+ }
+ default:
+ if (FS.getPrecision().hasDataArgument()) {
+ ArgsData.back().Precision = Args[FS.getPrecision().getArgIndex()];
+ }
+ break;
+ }
+ if (FS.getFieldWidth().hasDataArgument()) {
+ ArgsData.back().FieldWidth = Args[FS.getFieldWidth().getArgIndex()];
+ }
+
+ if (FS.isPrivate()) {
+ ArgsData.back().Flags |= OSLogBufferItem::IsPrivate;
+ }
+ if (FS.isPublic()) {
+ ArgsData.back().Flags |= OSLogBufferItem::IsPublic;
+ }
+ return true;
+ }
+
+ void computeLayout(ASTContext &Ctx, OSLogBufferLayout &Layout) const {
+ Layout.Items.clear();
+ for (auto &Data : ArgsData) {
+ if (Data.FieldWidth) {
+ CharUnits Size = Ctx.getTypeSizeInChars((*Data.FieldWidth)->getType());
+ Layout.Items.emplace_back(OSLogBufferItem::ScalarKind, *Data.FieldWidth,
+ Size, 0);
+ }
+ if (Data.Precision) {
+ CharUnits Size = Ctx.getTypeSizeInChars((*Data.Precision)->getType());
+ Layout.Items.emplace_back(OSLogBufferItem::ScalarKind, *Data.Precision,
+ Size, 0);
+ }
+ if (Data.Count) {
+ // "%.*P" has an extra "count" that we insert before the argument.
+ CharUnits Size = Ctx.getTypeSizeInChars((*Data.Count)->getType());
+ Layout.Items.emplace_back(OSLogBufferItem::CountKind, *Data.Count, Size,
+ 0);
+ }
+ if (Data.Size)
+ Layout.Items.emplace_back(Ctx, CharUnits::fromQuantity(*Data.Size),
+ Data.Flags);
+ if (Data.Kind) {
+ CharUnits Size;
+ if (*Data.Kind == OSLogBufferItem::ErrnoKind)
+ Size = CharUnits::Zero();
+ else
+ Size = Ctx.getTypeSizeInChars(Data.E->getType());
+ Layout.Items.emplace_back(*Data.Kind, Data.E, Size, Data.Flags);
+ } else {
+ auto Size = Ctx.getTypeSizeInChars(Data.E->getType());
+ Layout.Items.emplace_back(OSLogBufferItem::ScalarKind, Data.E, Size,
+ Data.Flags);
+ }
+ }
+ }
+};
+
+bool clang::analyze_os_log::computeOSLogBufferLayout(
+ ASTContext &Ctx, const CallExpr *E, OSLogBufferLayout &Layout) {
+ ArrayRef<const Expr *> Args(E->getArgs(), E->getArgs() + E->getNumArgs());
+
+ const Expr *StringArg;
+ ArrayRef<const Expr *> VarArgs;
+ switch (E->getBuiltinCallee()) {
+ case Builtin::BI__builtin_os_log_format_buffer_size:
+ assert(E->getNumArgs() >= 1 &&
+ "__builtin_os_log_format_buffer_size takes at least 1 argument");
+ StringArg = E->getArg(0);
+ VarArgs = Args.slice(1);
+ break;
+ case Builtin::BI__builtin_os_log_format:
+ assert(E->getNumArgs() >= 2 &&
+ "__builtin_os_log_format takes at least 2 arguments");
+ StringArg = E->getArg(1);
+ VarArgs = Args.slice(2);
+ break;
+ default:
+ llvm_unreachable("non-os_log builtin passed to computeOSLogBufferLayout");
+ }
+
+ const StringLiteral *Lit = cast<StringLiteral>(StringArg->IgnoreParenCasts());
+ assert(Lit && (Lit->isAscii() || Lit->isUTF8()));
+ StringRef Data = Lit->getString();
+ OSLogFormatStringHandler H(VarArgs);
+ ParsePrintfString(H, Data.begin(), Data.end(), Ctx.getLangOpts(),
+ Ctx.getTargetInfo(), /*isFreeBSDKPrintf*/ false);
+
+ H.computeLayout(Ctx, Layout);
+ return true;
+}
diff --git a/lib/Analysis/PrintfFormatString.cpp b/lib/Analysis/PrintfFormatString.cpp
index ac6cef9d0842..ed7193ecb437 100644
--- a/lib/Analysis/PrintfFormatString.cpp
+++ b/lib/Analysis/PrintfFormatString.cpp
@@ -119,6 +119,39 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
return true;
}
+ const char *OSLogVisibilityFlagsStart = nullptr,
+ *OSLogVisibilityFlagsEnd = nullptr;
+ if (*I == '{') {
+ OSLogVisibilityFlagsStart = I++;
+ // Find the end of the modifier.
+ while (I != E && *I != '}') {
+ I++;
+ }
+ if (I == E) {
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+ assert(*I == '}');
+ OSLogVisibilityFlagsEnd = I++;
+
+ // Just see if 'private' or 'public' is the first word. os_log itself will
+ // do any further parsing.
+ const char *P = OSLogVisibilityFlagsStart + 1;
+ while (P < OSLogVisibilityFlagsEnd && isspace(*P))
+ P++;
+ const char *WordStart = P;
+ while (P < OSLogVisibilityFlagsEnd && (isalnum(*P) || *P == '_'))
+ P++;
+ const char *WordEnd = P;
+ StringRef Word(WordStart, WordEnd - WordStart);
+ if (Word == "private") {
+ FS.setIsPrivate(WordStart);
+ } else if (Word == "public") {
+ FS.setIsPublic(WordStart);
+ }
+ }
+
// Look for flags (if any).
bool hasMore = true;
for ( ; I != E; ++I) {
@@ -253,6 +286,10 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
// POSIX specific.
case 'C': k = ConversionSpecifier::CArg; break;
case 'S': k = ConversionSpecifier::SArg; break;
+ // Apple extension for os_log
+ case 'P':
+ k = ConversionSpecifier::PArg;
+ break;
// Objective-C.
case '@': k = ConversionSpecifier::ObjCObjArg; break;
// Glibc specific.
@@ -301,7 +338,7 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
conversionPosition);
return true;
}
-
+
PrintfConversionSpecifier CS(conversionPosition, k);
FS.setConversionSpecifier(CS);
if (CS.consumesDataArgument() && !FS.usesPositionalArg())
@@ -541,6 +578,7 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
return Ctx.IntTy;
return ArgType(Ctx.WideCharTy, "wchar_t");
case ConversionSpecifier::pArg:
+ case ConversionSpecifier::PArg:
return ArgType::CPointerTy;
case ConversionSpecifier::ObjCObjArg:
return ArgType::ObjCPointerTy;
@@ -900,7 +938,7 @@ bool PrintfSpecifier::hasValidPrecision() const {
if (Precision.getHowSpecified() == OptionalAmount::NotSpecified)
return true;
- // Precision is only valid with the diouxXaAeEfFgGs conversions
+ // Precision is only valid with the diouxXaAeEfFgGsP conversions
switch (CS.getKind()) {
case ConversionSpecifier::dArg:
case ConversionSpecifier::DArg:
@@ -922,6 +960,7 @@ bool PrintfSpecifier::hasValidPrecision() const {
case ConversionSpecifier::sArg:
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
+ case ConversionSpecifier::PArg:
return true;
default:
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
index 8165b09f4080..69d000c03bac 100644
--- a/lib/Analysis/ReachableCode.cpp
+++ b/lib/Analysis/ReachableCode.cpp
@@ -164,6 +164,8 @@ static bool isConfigurationValue(const Stmt *S,
if (!S)
return false;
+ S = S->IgnoreImplicit();
+
if (const Expr *Ex = dyn_cast<Expr>(S))
S = Ex->IgnoreCasts();
diff --git a/lib/Analysis/ScanfFormatString.cpp b/lib/Analysis/ScanfFormatString.cpp
index 82b038864c23..3b93f1a57f1f 100644
--- a/lib/Analysis/ScanfFormatString.cpp
+++ b/lib/Analysis/ScanfFormatString.cpp
@@ -418,8 +418,12 @@ bool ScanfSpecifier::fixType(QualType QT, QualType RawQT,
QualType PT = QT->getPointeeType();
// If it's an enum, get its underlying type.
- if (const EnumType *ETy = PT->getAs<EnumType>())
+ if (const EnumType *ETy = PT->getAs<EnumType>()) {
+ // Don't try to fix incomplete enums.
+ if (!ETy->getDecl()->isComplete())
+ return false;
PT = ETy->getDecl()->getIntegerType();
+ }
const BuiltinType *BT = PT->getAs<BuiltinType>();
if (!BT)
diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp
index b282a5bbd8d8..879a15c9c2a8 100644
--- a/lib/Analysis/ThreadSafety.cpp
+++ b/lib/Analysis/ThreadSafety.cpp
@@ -15,13 +15,13 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Analysis/Analyses/ThreadSafety.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
-#include "clang/Analysis/Analyses/ThreadSafety.h"
#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
#include "clang/Analysis/Analyses/ThreadSafetyLogical.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
@@ -32,8 +32,6 @@
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
-#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallVector.h"
@@ -259,7 +257,7 @@ private:
struct BeforeInfo {
BeforeInfo() : Visited(0) {}
- BeforeInfo(BeforeInfo &&O) : Vect(std::move(O.Vect)), Visited(O.Visited) {}
+ BeforeInfo(BeforeInfo &&) = default;
BeforeVect Vect;
int Visited;
@@ -1585,7 +1583,7 @@ void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
/// a pointer marked with pt_guarded_by.
void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
ProtectedOperationKind POK) {
- Exp = Exp->IgnoreParenCasts();
+ Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
SourceLocation Loc = Exp->getExprLoc();
diff --git a/lib/Analysis/ThreadSafetyCommon.cpp b/lib/Analysis/ThreadSafetyCommon.cpp
index ffe95ea22a42..cbd5464c34d7 100644
--- a/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/lib/Analysis/ThreadSafetyCommon.cpp
@@ -17,20 +17,14 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
-#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/SourceManager.h"
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include <algorithm>
-#include <climits>
-#include <vector>
using namespace clang;
using namespace threadSafety;
@@ -239,6 +233,9 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
return translate(cast<ExprWithCleanups>(S)->getSubExpr(), Ctx);
case Stmt::CXXBindTemporaryExprClass:
return translate(cast<CXXBindTemporaryExpr>(S)->getSubExpr(), Ctx);
+ case Stmt::MaterializeTemporaryExprClass:
+ return translate(cast<MaterializeTemporaryExpr>(S)->GetTemporaryExpr(),
+ Ctx);
// Collect all literals
case Stmt::CharacterLiteralClass:
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index f2f791957aa3..d5289fb9d427 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -348,7 +348,8 @@ public:
}
static const DeclRefExpr *getSelfInitExpr(VarDecl *VD) {
- if (VD->getType()->isRecordType()) return nullptr;
+ if (VD->getType()->isRecordType())
+ return nullptr;
if (Expr *Init = VD->getInit()) {
const DeclRefExpr *DRE
= dyn_cast<DeclRefExpr>(stripCasts(VD->getASTContext(), Init));
diff --git a/lib/Basic/CMakeLists.txt b/lib/Basic/CMakeLists.txt
index ad460d496533..8929ec30ff7b 100644
--- a/lib/Basic/CMakeLists.txt
+++ b/lib/Basic/CMakeLists.txt
@@ -28,7 +28,7 @@ find_first_existing_vc_file(clang_vc "${CLANG_SOURCE_DIR}")
# The VC revision include that we want to generate.
set(version_inc "${CMAKE_CURRENT_BINARY_DIR}/SVNVersion.inc")
-set(get_svn_script "${LLVM_MAIN_SRC_DIR}/cmake/modules/GetSVN.cmake")
+set(get_svn_script "${LLVM_CMAKE_PATH}/GetSVN.cmake")
if(DEFINED llvm_vc AND DEFINED clang_vc)
# Create custom target to generate the VC revision include.
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index f10d156743b2..7529c475d6b9 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -55,10 +55,12 @@ static void DummyArgToStringFn(DiagnosticsEngine::ArgumentKind AK, intptr_t QT,
Output.append(Str.begin(), Str.end());
}
-DiagnosticsEngine::DiagnosticsEngine(
- const IntrusiveRefCntPtr<DiagnosticIDs> &diags, DiagnosticOptions *DiagOpts,
- DiagnosticConsumer *client, bool ShouldOwnClient)
- : Diags(diags), DiagOpts(DiagOpts), Client(nullptr), SourceMgr(nullptr) {
+DiagnosticsEngine::DiagnosticsEngine(IntrusiveRefCntPtr<DiagnosticIDs> diags,
+ DiagnosticOptions *DiagOpts,
+ DiagnosticConsumer *client,
+ bool ShouldOwnClient)
+ : Diags(std::move(diags)), DiagOpts(DiagOpts), Client(nullptr),
+ SourceMgr(nullptr) {
setClient(client, ShouldOwnClient);
ArgToStringFn = DummyArgToStringFn;
ArgToStringCookie = nullptr;
@@ -740,7 +742,10 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
// "%diff{compare $ to $|other text}1,2"
// treat it as:
// "compare %1 to %2"
- const char *Pipe = ScanFormat(Argument, Argument + ArgumentLen, '|');
+ const char *ArgumentEnd = Argument + ArgumentLen;
+ const char *Pipe = ScanFormat(Argument, ArgumentEnd, '|');
+ assert(ScanFormat(Pipe + 1, ArgumentEnd, '|') == ArgumentEnd &&
+ "Found too many '|'s in a %diff modifier!");
const char *FirstDollar = ScanFormat(Argument, Pipe, '$');
const char *SecondDollar = ScanFormat(FirstDollar + 1, Pipe, '$');
const char ArgStr1[] = { '%', static_cast<char>('0' + ArgNo) };
@@ -1008,7 +1013,7 @@ PartialDiagnostic::StorageAllocator::StorageAllocator() {
PartialDiagnostic::StorageAllocator::~StorageAllocator() {
// Don't assert if we are in a CrashRecovery context, as this invariant may
// be invalidated during a crash.
- assert((NumFreeListEntries == NumCached ||
- llvm::CrashRecoveryContext::isRecoveringFromCrash()) &&
- "A partial is on the lamb");
+ assert((NumFreeListEntries == NumCached ||
+ llvm::CrashRecoveryContext::isRecoveringFromCrash()) &&
+ "A partial is on the lam");
}
diff --git a/lib/Basic/DiagnosticOptions.cpp b/lib/Basic/DiagnosticOptions.cpp
index f54a0ef4edb2..93c2196ca979 100644
--- a/lib/Basic/DiagnosticOptions.cpp
+++ b/lib/Basic/DiagnosticOptions.cpp
@@ -16,7 +16,7 @@
namespace clang {
-raw_ostream& operator<<(raw_ostream& Out, DiagnosticLevelMask M) {
+raw_ostream &operator<<(raw_ostream &Out, DiagnosticLevelMask M) {
using UT = std::underlying_type<DiagnosticLevelMask>::type;
return Out << static_cast<UT>(M);
}
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
index ce9b7e1bb48c..50050d0a519b 100644
--- a/lib/Basic/FileManager.cpp
+++ b/lib/Basic/FileManager.cpp
@@ -26,10 +26,13 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
-#include <map>
-#include <set>
+#include <algorithm>
+#include <cassert>
+#include <climits>
+#include <cstdint>
+#include <cstdlib>
#include <string>
-#include <system_error>
+#include <utility>
using namespace clang;
@@ -137,7 +140,7 @@ void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
// Add the virtual directory to the cache.
auto UDE = llvm::make_unique<DirectoryEntry>();
- UDE->Name = NamedDirEnt.first().data();
+ UDE->Name = NamedDirEnt.first();
NamedDirEnt.second = UDE.get();
VirtualDirectoryEntries.push_back(std::move(UDE));
@@ -182,7 +185,7 @@ const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
// Get the null-terminated directory name as stored as the key of the
// SeenDirEntries map.
- const char *InterndDirName = NamedDirEnt.first().data();
+ StringRef InterndDirName = NamedDirEnt.first();
// Check to see if the directory exists.
FileData Data;
@@ -200,7 +203,7 @@ const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
DirectoryEntry &UDE = UniqueRealDirs[Data.UniqueID];
NamedDirEnt.second = &UDE;
- if (!UDE.getName()) {
+ if (UDE.getName().empty()) {
// We don't have this directory yet, add it. We use the string
// key from the SeenDirEntries map as the string.
UDE.Name = InterndDirName;
@@ -229,7 +232,7 @@ const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
// Get the null-terminated file name as stored as the key of the
// SeenFileEntries map.
- const char *InterndFileName = NamedFileEnt.first().data();
+ StringRef InterndFileName = NamedFileEnt.first();
// Look up the directory for the file. When looking up something like
// sys/foo.h we'll discover all of the search directories that have a 'sys'
@@ -420,7 +423,7 @@ FileManager::getBufferForFile(const FileEntry *Entry, bool isVolatile,
if (isVolatile)
FileSize = -1;
- const char *Filename = Entry->getName();
+ StringRef Filename = Entry->getName();
// If the file is already open, use the open file descriptor.
if (Entry->File) {
auto Result =
@@ -460,7 +463,7 @@ FileManager::getBufferForFile(StringRef Filename) {
/// if the path points to a virtual file or does not exist, or returns
/// false if it's an existent real file. If FileDescriptor is NULL,
/// do directory look-up instead of file look-up.
-bool FileManager::getStatValue(const char *Path, FileData &Data, bool isFile,
+bool FileManager::getStatValue(StringRef Path, FileData &Data, bool isFile,
std::unique_ptr<vfs::File> *F) {
// FIXME: FileSystemOpts shouldn't be passed in here, all paths should be
// absolute!
@@ -497,7 +500,6 @@ void FileManager::invalidateCache(const FileEntry *Entry) {
UniqueRealFiles.erase(Entry->getUniqueID());
}
-
void FileManager::GetUniqueIDMapping(
SmallVectorImpl<const FileEntry *> &UIDToFiles) const {
UIDToFiles.clear();
@@ -533,7 +535,7 @@ StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
#ifdef LLVM_ON_UNIX
char CanonicalNameBuf[PATH_MAX];
- if (realpath(Dir->getName(), CanonicalNameBuf))
+ if (realpath(Dir->getName().str().c_str(), CanonicalNameBuf))
CanonicalName = StringRef(CanonicalNameBuf).copy(CanonicalNameStorage);
#else
SmallString<256> CanonicalNameBuf(CanonicalName);
diff --git a/lib/Basic/FileSystemStatCache.cpp b/lib/Basic/FileSystemStatCache.cpp
index 187ea37e0c28..799df1d3c3a6 100644
--- a/lib/Basic/FileSystemStatCache.cpp
+++ b/lib/Basic/FileSystemStatCache.cpp
@@ -23,7 +23,7 @@ static void copyStatusToFileData(const vfs::Status &Status,
FileData &Data) {
Data.Name = Status.getName();
Data.Size = Status.getSize();
- Data.ModTime = Status.getLastModificationTime().toEpochTime();
+ Data.ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
Data.UniqueID = Status.getUniqueID();
Data.IsDirectory = Status.isDirectory();
Data.IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
@@ -40,7 +40,7 @@ static void copyStatusToFileData(const vfs::Status &Status,
/// success for directories (not files). On a successful file lookup, the
/// implementation can optionally fill in FileDescriptor with a valid
/// descriptor and the client guarantees that it will close it.
-bool FileSystemStatCache::get(const char *Path, FileData &Data, bool isFile,
+bool FileSystemStatCache::get(StringRef Path, FileData &Data, bool isFile,
std::unique_ptr<vfs::File> *F,
FileSystemStatCache *Cache, vfs::FileSystem &FS) {
LookupResult R;
@@ -107,7 +107,7 @@ bool FileSystemStatCache::get(const char *Path, FileData &Data, bool isFile,
}
MemorizeStatCalls::LookupResult
-MemorizeStatCalls::getStat(const char *Path, FileData &Data, bool isFile,
+MemorizeStatCalls::getStat(StringRef Path, FileData &Data, bool isFile,
std::unique_ptr<vfs::File> *F, vfs::FileSystem &FS) {
LookupResult Result = statChained(Path, Data, isFile, F, FS);
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index d6ad0f5c9158..af424cd92390 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -113,7 +113,8 @@ namespace {
KEYOBJC2 = 0x20000,
KEYZVECTOR = 0x40000,
KEYCOROUTINES = 0x80000,
- KEYALL = (0xfffff & ~KEYNOMS18 &
+ KEYMODULES = 0x100000,
+ KEYALL = (0x1fffff & ~KEYNOMS18 &
~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
};
@@ -147,9 +148,10 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
// We treat bridge casts as objective-C keywords so we can warn on them
// in non-arc mode.
if (LangOpts.ObjC2 && (Flags & KEYARC)) return KS_Enabled;
- if (LangOpts.ConceptsTS && (Flags & KEYCONCEPTS)) return KS_Enabled;
if (LangOpts.ObjC2 && (Flags & KEYOBJC2)) return KS_Enabled;
- if (LangOpts.Coroutines && (Flags & KEYCOROUTINES)) return KS_Enabled;
+ if (LangOpts.ConceptsTS && (Flags & KEYCONCEPTS)) return KS_Enabled;
+ if (LangOpts.CoroutinesTS && (Flags & KEYCOROUTINES)) return KS_Enabled;
+ if (LangOpts.ModulesTS && (Flags & KEYMODULES)) return KS_Enabled;
if (LangOpts.CPlusPlus && (Flags & KEYCXX11)) return KS_Future;
return KS_Disabled;
}
@@ -441,9 +443,11 @@ std::string Selector::getAsString() const {
if (getIdentifierInfoFlag() < MultiArg) {
IdentifierInfo *II = getAsIdentifierInfo();
- // If the number of arguments is 0 then II is guaranteed to not be null.
- if (getNumArgs() == 0)
+ if (getNumArgs() == 0) {
+ assert(II && "If the number of arguments is 0 then II is guaranteed to "
+ "not be null.");
return II->getName();
+ }
if (!II)
return ":";
@@ -619,8 +623,8 @@ Selector SelectorTable::getSelector(unsigned nKeys, IdentifierInfo **IIV) {
// variable size array (for parameter types) at the end of them.
unsigned Size = sizeof(MultiKeywordSelector) + nKeys*sizeof(IdentifierInfo *);
MultiKeywordSelector *SI =
- (MultiKeywordSelector*)SelTabImpl.Allocator.Allocate(Size,
- llvm::alignOf<MultiKeywordSelector>());
+ (MultiKeywordSelector *)SelTabImpl.Allocator.Allocate(
+ Size, alignof(MultiKeywordSelector));
new (SI) MultiKeywordSelector(nKeys, IIV);
SelTabImpl.Table.InsertNode(SI, InsertPos);
return Selector(SI);
diff --git a/lib/Basic/LangOptions.cpp b/lib/Basic/LangOptions.cpp
index 8c0ecd46ad55..ff10a773a97c 100644
--- a/lib/Basic/LangOptions.cpp
+++ b/lib/Basic/LangOptions.cpp
@@ -15,7 +15,8 @@
using namespace clang;
-LangOptions::LangOptions() {
+LangOptions::LangOptions()
+ : IsHeaderFile(false) {
#define LANGOPT(Name, Bits, Default, Description) Name = Default;
#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) set##Name(Default);
#include "clang/Basic/LangOptions.def"
@@ -34,10 +35,10 @@ void LangOptions::resetNonModularOptions() {
SanitizerBlacklistFiles.clear();
CurrentModule.clear();
+ IsHeaderFile = false;
}
-bool LangOptions::isNoBuiltinFunc(const char *Name) const {
- StringRef FuncName(Name);
+bool LangOptions::isNoBuiltinFunc(StringRef FuncName) const {
for (unsigned i = 0, e = NoBuiltinFuncs.size(); i != e; ++i)
if (FuncName.equals(NoBuiltinFuncs[i]))
return true;
diff --git a/lib/Basic/Module.cpp b/lib/Basic/Module.cpp
index 3d1a40db5ea2..80bbc24f3db3 100644
--- a/lib/Basic/Module.cpp
+++ b/lib/Basic/Module.cpp
@@ -33,7 +33,7 @@ Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
IsExplicit(IsExplicit), IsSystem(false), IsExternC(false),
IsInferred(false), InferSubmodules(false), InferExplicitSubmodules(false),
InferExportWildcard(false), ConfigMacrosExhaustive(false),
- NameVisibility(Hidden) {
+ NoUndeclaredIncludes(false), NameVisibility(Hidden) {
if (Parent) {
if (!Parent->isAvailable())
IsAvailable = false;
@@ -41,6 +41,8 @@ Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
IsSystem = true;
if (Parent->IsExternC)
IsExternC = true;
+ if (Parent->NoUndeclaredIncludes)
+ NoUndeclaredIncludes = true;
IsMissingRequirement = Parent->IsMissingRequirement;
Parent->SubModuleIndex[Name] = Parent->SubModules.size();
@@ -64,6 +66,8 @@ static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
.Case("blocks", LangOpts.Blocks)
.Case("cplusplus", LangOpts.CPlusPlus)
.Case("cplusplus11", LangOpts.CPlusPlus11)
+ .Case("freestanding", LangOpts.Freestanding)
+ .Case("gnuinlineasm", LangOpts.GNUAsm)
.Case("objc", LangOpts.ObjC1)
.Case("objc_arc", LangOpts.ObjCAutoRefCount)
.Case("opencl", LangOpts.OpenCL)
@@ -179,6 +183,11 @@ bool Module::directlyUses(const Module *Requested) const {
for (auto *Use : Top->DirectUses)
if (Requested->isSubModuleOf(Use))
return true;
+
+ // Anyone is allowed to use our builtin stddef.h and its accompanying module.
+ if (!Requested->Parent && Requested->Name == "_Builtin_stddef_max_align_t")
+ return true;
+
return false;
}
diff --git a/lib/Basic/OpenMPKinds.cpp b/lib/Basic/OpenMPKinds.cpp
index d1e4779e2c72..4675995ea722 100644
--- a/lib/Basic/OpenMPKinds.cpp
+++ b/lib/Basic/OpenMPKinds.cpp
@@ -610,6 +610,86 @@ bool clang::isAllowedClauseForDirective(OpenMPDirectiveKind DKind,
break;
}
break;
+ case OMPD_target_simd:
+ switch (CKind) {
+#define OPENMP_TARGET_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_teams_distribute:
+ switch (CKind) {
+#define OPENMP_TEAMS_DISTRIBUTE_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_teams_distribute_simd:
+ switch (CKind) {
+#define OPENMP_TEAMS_DISTRIBUTE_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_teams_distribute_parallel_for_simd:
+ switch (CKind) {
+#define OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_teams_distribute_parallel_for:
+ switch (CKind) {
+#define OPENMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_target_teams:
+ switch (CKind) {
+#define OPENMP_TARGET_TEAMS_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_target_teams_distribute:
+ switch (CKind) {
+#define OPENMP_TARGET_TEAMS_DISTRIBUTE_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
+ case OMPD_target_teams_distribute_parallel_for:
+ switch (CKind) {
+#define OPENMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_CLAUSE(Name) \
+ case OMPC_##Name: \
+ return true;
+#include "clang/Basic/OpenMPKinds.def"
+ default:
+ break;
+ }
+ break;
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_unknown:
@@ -635,8 +715,13 @@ bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_distribute_parallel_for ||
DKind == OMPD_distribute_parallel_for_simd ||
DKind == OMPD_distribute_simd ||
- DKind == OMPD_target_parallel_for_simd;
- // TODO add next directives.
+ DKind == OMPD_target_parallel_for_simd || DKind == OMPD_target_simd ||
+ DKind == OMPD_teams_distribute ||
+ DKind == OMPD_teams_distribute_simd ||
+ DKind == OMPD_teams_distribute_parallel_for_simd ||
+ DKind == OMPD_teams_distribute_parallel_for ||
+ DKind == OMPD_target_teams_distribute ||
+ DKind == OMPD_target_teams_distribute_parallel_for;
}
bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
@@ -647,7 +732,10 @@ bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_parallel_for ||
DKind == OMPD_distribute_parallel_for ||
DKind == OMPD_distribute_parallel_for_simd ||
- DKind == OMPD_target_parallel_for_simd;
+ DKind == OMPD_target_parallel_for_simd ||
+ DKind == OMPD_teams_distribute_parallel_for_simd ||
+ DKind == OMPD_teams_distribute_parallel_for ||
+ DKind == OMPD_target_teams_distribute_parallel_for;
// TODO add next directives.
}
@@ -661,15 +749,19 @@ bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_parallel || DKind == OMPD_target_parallel_for ||
DKind == OMPD_distribute_parallel_for ||
DKind == OMPD_distribute_parallel_for_simd ||
- DKind == OMPD_target_parallel_for_simd;
+ DKind == OMPD_target_parallel_for_simd ||
+ DKind == OMPD_teams_distribute_parallel_for ||
+ DKind == OMPD_teams_distribute_parallel_for_simd ||
+ DKind == OMPD_target_teams_distribute_parallel_for;
// TODO add next directives.
}
bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
- // TODO add next directives.
return DKind == OMPD_target || DKind == OMPD_target_parallel ||
DKind == OMPD_target_parallel_for ||
- DKind == OMPD_target_parallel_for_simd;
+ DKind == OMPD_target_parallel_for_simd || DKind == OMPD_target_simd ||
+ DKind == OMPD_target_teams || DKind == OMPD_target_teams_distribute ||
+ DKind == OMPD_target_teams_distribute_parallel_for;
}
bool clang::isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind) {
@@ -677,25 +769,45 @@ bool clang::isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_exit_data || DKind == OMPD_target_update;
}
+bool clang::isOpenMPNestingTeamsDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_teams || DKind == OMPD_teams_distribute ||
+ DKind == OMPD_teams_distribute_simd ||
+ DKind == OMPD_teams_distribute_parallel_for_simd ||
+ DKind == OMPD_teams_distribute_parallel_for;
+}
+
bool clang::isOpenMPTeamsDirective(OpenMPDirectiveKind DKind) {
- return DKind == OMPD_teams; // TODO add next directives.
+ return isOpenMPNestingTeamsDirective(DKind) ||
+ DKind == OMPD_target_teams || DKind == OMPD_target_teams_distribute ||
+ DKind == OMPD_target_teams_distribute_parallel_for;
}
bool clang::isOpenMPSimdDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_simd || DKind == OMPD_for_simd ||
DKind == OMPD_parallel_for_simd || DKind == OMPD_taskloop_simd ||
DKind == OMPD_distribute_parallel_for_simd ||
- DKind == OMPD_distribute_simd;
+ DKind == OMPD_distribute_simd || DKind == OMPD_target_simd ||
+ DKind == OMPD_teams_distribute_simd ||
+ DKind == OMPD_teams_distribute_parallel_for_simd;
// TODO add next directives.
}
-bool clang::isOpenMPDistributeDirective(OpenMPDirectiveKind Kind) {
+bool clang::isOpenMPNestingDistributeDirective(OpenMPDirectiveKind Kind) {
return Kind == OMPD_distribute || Kind == OMPD_distribute_parallel_for ||
Kind == OMPD_distribute_parallel_for_simd ||
Kind == OMPD_distribute_simd;
// TODO add next directives.
}
+bool clang::isOpenMPDistributeDirective(OpenMPDirectiveKind Kind) {
+ return isOpenMPNestingDistributeDirective(Kind) ||
+ Kind == OMPD_teams_distribute || Kind == OMPD_teams_distribute_simd ||
+ Kind == OMPD_teams_distribute_parallel_for_simd ||
+ Kind == OMPD_teams_distribute_parallel_for ||
+ Kind == OMPD_target_teams_distribute ||
+ Kind == OMPD_target_teams_distribute_parallel_for;
+}
+
bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) {
return Kind == OMPC_private || Kind == OMPC_firstprivate ||
Kind == OMPC_lastprivate || Kind == OMPC_linear ||
@@ -713,5 +825,10 @@ bool clang::isOpenMPTaskingDirective(OpenMPDirectiveKind Kind) {
bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
return Kind == OMPD_distribute_parallel_for ||
Kind == OMPD_distribute_parallel_for_simd ||
- Kind == OMPD_distribute_simd;
+ Kind == OMPD_distribute_simd || Kind == OMPD_teams_distribute ||
+ Kind == OMPD_teams_distribute_simd ||
+ Kind == OMPD_teams_distribute_parallel_for_simd ||
+ Kind == OMPD_teams_distribute_parallel_for ||
+ Kind == OMPD_target_teams_distribute ||
+ Kind == OMPD_target_teams_distribute_parallel_for;
}
diff --git a/lib/Basic/SourceLocation.cpp b/lib/Basic/SourceLocation.cpp
index d254e8620a96..a58d0465a6f4 100644
--- a/lib/Basic/SourceLocation.cpp
+++ b/lib/Basic/SourceLocation.cpp
@@ -14,7 +14,6 @@
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/SourceManager.h"
-#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <cstdio>
using namespace clang;
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index 1e83b63cf82e..380ca373e69b 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -25,7 +25,6 @@
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstring>
-#include <string>
using namespace clang;
using namespace SrcMgr;
@@ -387,8 +386,6 @@ SourceManager::~SourceManager() {
ContentCacheAlloc.Deallocate(I->second);
}
}
-
- llvm::DeleteContainerSeconds(MacroArgsCacheMap);
}
void SourceManager::clearIDTables() {
@@ -1438,8 +1435,8 @@ SourceManager::getFileCharacteristic(SourceLocation Loc) const {
/// Return the filename or buffer identifier of the buffer the location is in.
/// Note that this name does not respect \#line directives. Use getPresumedLoc
/// for normal clients.
-const char *SourceManager::getBufferName(SourceLocation Loc,
- bool *Invalid) const {
+StringRef SourceManager::getBufferName(SourceLocation Loc,
+ bool *Invalid) const {
if (isInvalid(Loc, Invalid)) return "<invalid loc>";
return getBuffer(getFileID(Loc), Invalid)->getBufferIdentifier();
@@ -1471,7 +1468,7 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc,
// To get the source name, first consult the FileEntry (if one exists)
// before the MemBuffer as this will avoid unnecessarily paging in the
// MemBuffer.
- const char *Filename;
+ StringRef Filename;
if (C->OrigEntry)
Filename = C->OrigEntry->getName();
else
@@ -1514,7 +1511,7 @@ PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc,
}
}
- return PresumedLoc(Filename, LineNo, ColNo, IncludeLoc);
+ return PresumedLoc(Filename.data(), LineNo, ColNo, IncludeLoc);
}
/// \brief Returns whether the PresumedLoc for a given SourceLocation is
@@ -1785,13 +1782,10 @@ SourceLocation SourceManager::translateLineCol(FileID FID,
/// 0 -> SourceLocation()
/// 100 -> Expanded macro arg location
/// 110 -> SourceLocation()
-void SourceManager::computeMacroArgsCache(MacroArgsMap *&CachePtr,
+void SourceManager::computeMacroArgsCache(MacroArgsMap &MacroArgsCache,
FileID FID) const {
assert(FID.isValid());
- assert(!CachePtr);
- CachePtr = new MacroArgsMap();
- MacroArgsMap &MacroArgsCache = *CachePtr;
// Initially no macro argument chunk is present.
MacroArgsCache.insert(std::make_pair(0, SourceLocation()));
@@ -1941,9 +1935,11 @@ SourceManager::getMacroArgExpandedLocation(SourceLocation Loc) const {
if (FID.isInvalid())
return Loc;
- MacroArgsMap *&MacroArgsCache = MacroArgsCacheMap[FID];
- if (!MacroArgsCache)
- computeMacroArgsCache(MacroArgsCache, FID);
+ std::unique_ptr<MacroArgsMap> &MacroArgsCache = MacroArgsCacheMap[FID];
+ if (!MacroArgsCache) {
+ MacroArgsCache = llvm::make_unique<MacroArgsMap>();
+ computeMacroArgsCache(*MacroArgsCache, FID);
+ }
assert(!MacroArgsCache->empty());
MacroArgsMap::iterator I = MacroArgsCache->upper_bound(Offset);
@@ -2096,10 +2092,10 @@ bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
// Clear the lookup cache, it depends on a common location.
IsBeforeInTUCache.clear();
- const char *LB = getBuffer(LOffs.first)->getBufferIdentifier();
- const char *RB = getBuffer(ROffs.first)->getBufferIdentifier();
- bool LIsBuiltins = strcmp("<built-in>", LB) == 0;
- bool RIsBuiltins = strcmp("<built-in>", RB) == 0;
+ StringRef LB = getBuffer(LOffs.first)->getBufferIdentifier();
+ StringRef RB = getBuffer(ROffs.first)->getBufferIdentifier();
+ bool LIsBuiltins = LB == "<built-in>";
+ bool RIsBuiltins = RB == "<built-in>";
// Sort built-in before non-built-in.
if (LIsBuiltins || RIsBuiltins) {
if (LIsBuiltins != RIsBuiltins)
@@ -2108,8 +2104,8 @@ bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
// lower IDs come first.
return LOffs.first < ROffs.first;
}
- bool LIsAsm = strcmp("<inline asm>", LB) == 0;
- bool RIsAsm = strcmp("<inline asm>", RB) == 0;
+ bool LIsAsm = LB == "<inline asm>";
+ bool RIsAsm = RB == "<inline asm>";
// Sort assembler after built-ins, but before the rest.
if (LIsAsm || RIsAsm) {
if (LIsAsm != RIsAsm)
@@ -2117,8 +2113,8 @@ bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
assert(LOffs.first == ROffs.first);
return false;
}
- bool LIsScratch = strcmp("<scratch space>", LB) == 0;
- bool RIsScratch = strcmp("<scratch space>", RB) == 0;
+ bool LIsScratch = LB == "<scratch space>";
+ bool RIsScratch = RB == "<scratch space>";
// Sort scratch after inline asm, but before the rest.
if (LIsScratch || RIsScratch) {
if (LIsScratch != RIsScratch)
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index 92f658a6a37f..b1b01e5f584f 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -27,7 +27,7 @@ static const LangAS::Map DefaultAddrSpaceMap = { 0 };
TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
// Set defaults. Defaults are set for a 32-bit RISC platform, like PPC or
// SPARC. These should be overridden by concrete targets as needed.
- BigEndian = true;
+ BigEndian = !T.isLittleEndian();
TLSSupported = true;
NoAsmVariants = false;
HasFloat128 = false;
@@ -39,6 +39,13 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
SuitableAlign = 64;
DefaultAlignForAttributeAligned = 128;
MinGlobalAlign = 0;
+ // From the glibc documentation, on GNU systems, malloc guarantees 16-byte
+ // alignment on 64-bit systems and 8-byte alignment on 32-bit systems. See
+ // https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html
+ if (T.isGNUEnvironment())
+ NewAlign = Triple.isArch64Bit() ? 128 : Triple.isArch32Bit() ? 64 : 0;
+ else
+ NewAlign = 0; // Infer from basic type alignment.
HalfWidth = 16;
HalfAlign = 16;
FloatWidth = 32;
@@ -70,16 +77,17 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
UseZeroLengthBitfieldAlignment = false;
UseExplicitBitFieldAlignment = true;
ZeroLengthBitfieldBoundary = 0;
- HalfFormat = &llvm::APFloat::IEEEhalf;
- FloatFormat = &llvm::APFloat::IEEEsingle;
- DoubleFormat = &llvm::APFloat::IEEEdouble;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble;
- Float128Format = &llvm::APFloat::IEEEquad;
+ HalfFormat = &llvm::APFloat::IEEEhalf();
+ FloatFormat = &llvm::APFloat::IEEEsingle();
+ DoubleFormat = &llvm::APFloat::IEEEdouble();
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ Float128Format = &llvm::APFloat::IEEEquad();
MCountName = "mcount";
RegParmMax = 0;
SSERegParmMax = 0;
HasAlignMac68kSupport = false;
HasBuiltinMSVaList = false;
+ IsRenderScriptTarget = false;
// Default to no types using fpret.
RealTypeUsesObjCFPRet = 0;
@@ -219,12 +227,12 @@ TargetInfo::RealType TargetInfo::getRealTypeByWidth(unsigned BitWidth) const {
switch (BitWidth) {
case 96:
- if (&getLongDoubleFormat() == &llvm::APFloat::x87DoubleExtended)
+ if (&getLongDoubleFormat() == &llvm::APFloat::x87DoubleExtended())
return LongDouble;
break;
case 128:
- if (&getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble ||
- &getLongDoubleFormat() == &llvm::APFloat::IEEEquad)
+ if (&getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble() ||
+ &getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
return LongDouble;
if (hasFloat128Type())
return Float128;
@@ -301,12 +309,13 @@ void TargetInfo::adjust(const LangOptions &Opts) {
// to generating illegal code that uses 64bit doubles.
if (DoubleWidth != FloatWidth) {
DoubleWidth = DoubleAlign = 64;
- DoubleFormat = &llvm::APFloat::IEEEdouble;
+ DoubleFormat = &llvm::APFloat::IEEEdouble();
}
LongDoubleWidth = LongDoubleAlign = 128;
- assert(PointerWidth == 32 || PointerWidth == 64);
- bool Is32BitArch = PointerWidth == 32;
+ unsigned MaxPointerWidth = getMaxPointerWidth();
+ assert(MaxPointerWidth == 32 || MaxPointerWidth == 64);
+ bool Is32BitArch = MaxPointerWidth == 32;
SizeType = Is32BitArch ? UnsignedInt : UnsignedLong;
PtrDiffType = Is32BitArch ? SignedInt : SignedLong;
IntPtrType = Is32BitArch ? SignedInt : SignedLong;
@@ -314,10 +323,13 @@ void TargetInfo::adjust(const LangOptions &Opts) {
IntMaxType = SignedLongLong;
Int64Type = SignedLong;
- HalfFormat = &llvm::APFloat::IEEEhalf;
- FloatFormat = &llvm::APFloat::IEEEsingle;
- LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ HalfFormat = &llvm::APFloat::IEEEhalf();
+ FloatFormat = &llvm::APFloat::IEEEsingle();
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
}
+
+ if (Opts.NewAlignOverride)
+ NewAlign = Opts.NewAlignOverride * getCharWidth();
}
bool TargetInfo::initFeatureMap(
@@ -398,8 +410,8 @@ bool TargetInfo::isValidGCCRegisterName(StringRef Name) const {
return false;
}
-StringRef
-TargetInfo::getNormalizedGCCRegisterName(StringRef Name) const {
+StringRef TargetInfo::getNormalizedGCCRegisterName(StringRef Name,
+ bool ReturnCanonical) const {
assert(isValidGCCRegisterName(Name) && "Invalid register passed in");
// Get rid of any register prefix.
@@ -424,7 +436,7 @@ TargetInfo::getNormalizedGCCRegisterName(StringRef Name) const {
// Make sure the register that the additional name is for is within
// the bounds of the register names from above.
if (AN == Name && ARN.RegNum < Names.size())
- return Name;
+ return ReturnCanonical ? Names[ARN.RegNum] : Name;
}
// Now check aliases.
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index be5d4ad8feda..85a83bca002b 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -21,6 +21,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "clang/Basic/Version.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
@@ -464,6 +465,8 @@ protected:
Triple.getEnvironmentVersion(Maj, Min, Rev);
this->PlatformName = "android";
this->PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+ if (Maj)
+ Builder.defineMacro("__ANDROID_API__", Twine(Maj));
}
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
@@ -805,8 +808,8 @@ public:
this->SizeType = TargetInfo::UnsignedInt;
this->PtrDiffType = TargetInfo::SignedInt;
this->IntPtrType = TargetInfo::SignedInt;
- // RegParmMax is inherited from the underlying architecture
- this->LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ // RegParmMax is inherited from the underlying architecture.
+ this->LongDoubleFormat = &llvm::APFloat::IEEEdouble();
if (Triple.getArch() == llvm::Triple::arm) {
// Handled in ARM's setABI().
} else if (Triple.getArch() == llvm::Triple::x86) {
@@ -822,6 +825,28 @@ public:
}
};
+// Fuchsia Target
+template<typename Target>
+class FuchsiaTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ Builder.defineMacro("__Fuchsia__");
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ // Required by the libc++ locale support.
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+public:
+ FuchsiaTargetInfo(const llvm::Triple &Triple,
+ const TargetOptions &Opts)
+ : OSTargetInfo<Target>(Triple, Opts) {
+ this->MCountName = "__mcount";
+ }
+};
+
// WebAssembly target
template <typename Target>
class WebAssemblyOSTargetInfo : public OSTargetInfo<Target> {
@@ -869,6 +894,7 @@ class PPCTargetInfo : public TargetInfo {
bool HasHTM;
bool HasBPERMD;
bool HasExtDiv;
+ bool HasP9Vector;
protected:
std::string ABI;
@@ -877,11 +903,10 @@ public:
PPCTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple), HasVSX(false), HasP8Vector(false),
HasP8Crypto(false), HasDirectMove(false), HasQPX(false), HasHTM(false),
- HasBPERMD(false), HasExtDiv(false) {
- BigEndian = (Triple.getArch() != llvm::Triple::ppc64le);
+ HasBPERMD(false), HasExtDiv(false), HasP9Vector(false) {
SimdDefaultAlign = 128;
LongDoubleWidth = LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble;
+ LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
}
/// \brief Flags for architecture specific defines.
@@ -1121,7 +1146,7 @@ public:
bool useFloat128ManglingForLongDouble() const override {
return LongDoubleWidth == 128 &&
- LongDoubleFormat == &llvm::APFloat::PPCDoubleDouble &&
+ LongDoubleFormat == &llvm::APFloat::PPCDoubleDouble() &&
getTriple().isOSBinFormatELF();
}
};
@@ -1157,6 +1182,8 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasHTM = true;
} else if (Feature == "+float128") {
HasFloat128 = true;
+ } else if (Feature == "+power9-vector") {
+ HasP9Vector = true;
}
// TODO: Finish this list and add an assert that we've handled them
// all.
@@ -1326,6 +1353,8 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__HTM__");
if (HasFloat128)
Builder.defineMacro("__FLOAT128__");
+ if (HasP9Vector)
+ Builder.defineMacro("__POWER9_VECTOR__");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
@@ -1355,8 +1384,12 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
}
// Handle explicit options being passed to the compiler here: if we've
-// explicitly turned off vsx and turned on power8-vector or direct-move then
-// go ahead and error since the customer has expressed a somewhat incompatible
+// explicitly turned off vsx and turned on any of:
+// - power8-vector
+// - direct-move
+// - float128
+// - power9-vector
+// then go ahead and error since the customer has expressed an incompatible
// set of options.
static bool ppcUserFeaturesCheck(DiagnosticsEngine &Diags,
const std::vector<std::string> &FeaturesVec) {
@@ -1383,6 +1416,13 @@ static bool ppcUserFeaturesCheck(DiagnosticsEngine &Diags,
<< "-mno-vsx";
return false;
}
+
+ if (std::find(FeaturesVec.begin(), FeaturesVec.end(), "+power9-vector") !=
+ FeaturesVec.end()) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mpower9-vector"
+ << "-mno-vsx";
+ return false;
+ }
}
return true;
@@ -1407,6 +1447,7 @@ bool PPCTargetInfo::initFeatureMap(
.Default(false);
Features["qpx"] = (CPU == "a2q");
+ Features["power9-vector"] = (CPU == "pwr9");
Features["crypto"] = llvm::StringSwitch<bool>(CPU)
.Case("ppc64le", true)
.Case("pwr9", true)
@@ -1459,6 +1500,7 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const {
.Case("bpermd", HasBPERMD)
.Case("extdiv", HasExtDiv)
.Case("float128", HasFloat128)
+ .Case("power9-vector", HasP9Vector)
.Default(false);
}
@@ -1468,19 +1510,21 @@ void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
// as well. Do the inverse if we're disabling vsx. We'll diagnose any user
// incompatible options.
if (Enabled) {
- if (Name == "direct-move") {
- Features[Name] = Features["vsx"] = true;
- } else if (Name == "power8-vector") {
- Features[Name] = Features["vsx"] = true;
- } else if (Name == "float128") {
+ if (Name == "direct-move" ||
+ Name == "power8-vector" ||
+ Name == "float128" ||
+ Name == "power9-vector") {
+ // power9-vector is really a superset of power8-vector so encode that.
Features[Name] = Features["vsx"] = true;
+ if (Name == "power9-vector")
+ Features["power8-vector"] = true;
} else {
Features[Name] = true;
}
} else {
if (Name == "vsx") {
Features[Name] = Features["direct-move"] = Features["power8-vector"] =
- Features["float128"] = false;
+ Features["float128"] = Features["power9-vector"] = false;
} else {
Features[Name] = false;
}
@@ -1606,7 +1650,7 @@ public:
if (getTriple().getOS() == llvm::Triple::FreeBSD) {
LongDoubleWidth = LongDoubleAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
// PPC32 supports atomics up to 4 bytes.
@@ -1640,7 +1684,7 @@ public:
switch (getTriple().getOS()) {
case llvm::Triple::FreeBSD:
LongDoubleWidth = LongDoubleAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
break;
case llvm::Triple::NetBSD:
IntMaxType = SignedLongLong;
@@ -1711,7 +1755,6 @@ class NVPTXTargetInfo : public TargetInfo {
public:
NVPTXTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: TargetInfo(Triple) {
- BigEndian = false;
TLSSupported = false;
LongWidth = LongAlign = 64;
AddrSpaceMap = &NVPTXAddrSpaceMap;
@@ -1749,6 +1792,7 @@ public:
LongLongWidth = HostTarget->getLongLongWidth();
LongLongAlign = HostTarget->getLongLongAlign();
MinGlobalAlign = HostTarget->getMinGlobalAlign();
+ NewAlign = HostTarget->getNewAlign();
DefaultAlignForAttributeAligned =
HostTarget->getDefaultAlignForAttributeAligned();
SizeType = HostTarget->getSizeType();
@@ -1769,6 +1813,12 @@ public:
UseExplicitBitFieldAlignment = HostTarget->useExplicitBitFieldAlignment();
ZeroLengthBitfieldBoundary = HostTarget->getZeroLengthBitfieldBoundary();
+ // This is a bit of a lie, but it controls __GCC_ATOMIC_XXX_LOCK_FREE, and
+ // we need those macros to be identical on host and device, because (among
+ // other things) they affect which standard library classes are defined, and
+ // we need all classes to be defined on both the host and device.
+ MaxAtomicInlineWidth = HostTarget->getMaxAtomicInlineWidth();
+
// Properties intentionally not copied from host:
// - LargeArrayMinWidth, LargeArrayAlign: Not visible across the
// host/device boundary.
@@ -1825,8 +1875,19 @@ public:
return llvm::makeArrayRef(BuiltinInfo,
clang::NVPTX::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override {
+ Features["satom"] = GPU >= CudaArch::SM_60;
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+ }
+
bool hasFeature(StringRef Feature) const override {
- return Feature == "ptx" || Feature == "nvptx";
+ return llvm::StringSwitch<bool>(Feature)
+ .Cases("ptx", "nvptx", true)
+ .Case("satom", GPU >= CudaArch::SM_60) // Atomics w/ scope.
+ .Default(false);
}
ArrayRef<const char *> getGCCRegNames() const override;
@@ -1863,16 +1924,16 @@ public:
}
void setSupportedOpenCLOpts() override {
auto &Opts = getSupportedOpenCLOpts();
- Opts.cl_clang_storage_class_specifiers = 1;
- Opts.cl_khr_gl_sharing = 1;
- Opts.cl_khr_icd = 1;
+ Opts.support("cl_clang_storage_class_specifiers");
+ Opts.support("cl_khr_gl_sharing");
+ Opts.support("cl_khr_icd");
- Opts.cl_khr_fp64 = 1;
- Opts.cl_khr_byte_addressable_store = 1;
- Opts.cl_khr_global_int32_base_atomics = 1;
- Opts.cl_khr_global_int32_extended_atomics = 1;
- Opts.cl_khr_local_int32_base_atomics = 1;
- Opts.cl_khr_local_int32_extended_atomics = 1;
+ Opts.support("cl_khr_fp64");
+ Opts.support("cl_khr_byte_addressable_store");
+ Opts.support("cl_khr_global_int32_base_atomics");
+ Opts.support("cl_khr_global_int32_extended_atomics");
+ Opts.support("cl_khr_local_int32_base_atomics");
+ Opts.support("cl_khr_local_int32_extended_atomics");
}
};
@@ -1881,6 +1942,8 @@ const Builtin::Info NVPTXTargetInfo::BuiltinInfo[] = {
{ #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
{ #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE },
#include "clang/Basic/BuiltinsNVPTX.def"
};
@@ -1952,26 +2015,28 @@ class AMDGPUTargetInfo final : public TargetInfo {
GK_EVERGREEN_DOUBLE_OPS,
GK_NORTHERN_ISLANDS,
GK_CAYMAN,
- GK_SOUTHERN_ISLANDS,
- GK_SEA_ISLANDS,
- GK_VOLCANIC_ISLANDS
+ GK_GFX6,
+ GK_GFX7,
+ GK_GFX8
} GPU;
bool hasFP64:1;
bool hasFMAF:1;
bool hasLDEXPF:1;
+ bool hasFullSpeedFP32Denorms:1;
static bool isAMDGCN(const llvm::Triple &TT) {
return TT.getArch() == llvm::Triple::amdgcn;
}
public:
- AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: TargetInfo(Triple) ,
- GPU(isAMDGCN(Triple) ? GK_SOUTHERN_ISLANDS : GK_R600),
+ GPU(isAMDGCN(Triple) ? GK_GFX6 : GK_R600),
hasFP64(false),
hasFMAF(false),
- hasLDEXPF(false) {
+ hasLDEXPF(false),
+ hasFullSpeedFP32Denorms(false){
if (getTriple().getArch() == llvm::Triple::amdgcn) {
hasFP64 = true;
hasFMAF = true;
@@ -1999,6 +2064,10 @@ public:
}
}
+ uint64_t getMaxPointerWidth() const override {
+ return getTriple().getArch() == llvm::Triple::amdgcn ? 64 : 32;
+ }
+
const char * getClobbers() const override {
return "";
}
@@ -2025,6 +2094,24 @@ public:
DiagnosticsEngine &Diags, StringRef CPU,
const std::vector<std::string> &FeatureVec) const override;
+ void adjustTargetOptions(const CodeGenOptions &CGOpts,
+ TargetOptions &TargetOpts) const override {
+ bool hasFP32Denormals = false;
+ bool hasFP64Denormals = false;
+ for (auto &I : TargetOpts.FeaturesAsWritten) {
+ if (I == "+fp32-denormals" || I == "-fp32-denormals")
+ hasFP32Denormals = true;
+ if (I == "+fp64-denormals" || I == "-fp64-denormals")
+ hasFP64Denormals = true;
+ }
+ if (!hasFP32Denormals)
+ TargetOpts.Features.push_back((Twine(hasFullSpeedFP32Denorms &&
+ !CGOpts.FlushDenorm ? '+' : '-') + Twine("fp32-denormals")).str());
+ // Always do not flush fp64 denorms.
+ if (!hasFP64Denormals && hasFP64)
+ TargetOpts.Features.push_back("+fp64-denormals");
+ }
+
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
return llvm::makeArrayRef(BuiltinInfo,
clang::AMDGPU::LastTSBuiltin - Builtin::FirstTSBuiltin);
@@ -2081,23 +2168,32 @@ public:
static GPUKind parseAMDGCNName(StringRef Name) {
return llvm::StringSwitch<GPUKind>(Name)
- .Case("tahiti", GK_SOUTHERN_ISLANDS)
- .Case("pitcairn", GK_SOUTHERN_ISLANDS)
- .Case("verde", GK_SOUTHERN_ISLANDS)
- .Case("oland", GK_SOUTHERN_ISLANDS)
- .Case("hainan", GK_SOUTHERN_ISLANDS)
- .Case("bonaire", GK_SEA_ISLANDS)
- .Case("kabini", GK_SEA_ISLANDS)
- .Case("kaveri", GK_SEA_ISLANDS)
- .Case("hawaii", GK_SEA_ISLANDS)
- .Case("mullins", GK_SEA_ISLANDS)
- .Case("tonga", GK_VOLCANIC_ISLANDS)
- .Case("iceland", GK_VOLCANIC_ISLANDS)
- .Case("carrizo", GK_VOLCANIC_ISLANDS)
- .Case("fiji", GK_VOLCANIC_ISLANDS)
- .Case("stoney", GK_VOLCANIC_ISLANDS)
- .Case("polaris10", GK_VOLCANIC_ISLANDS)
- .Case("polaris11", GK_VOLCANIC_ISLANDS)
+ .Case("tahiti", GK_GFX6)
+ .Case("pitcairn", GK_GFX6)
+ .Case("verde", GK_GFX6)
+ .Case("oland", GK_GFX6)
+ .Case("hainan", GK_GFX6)
+ .Case("bonaire", GK_GFX7)
+ .Case("kabini", GK_GFX7)
+ .Case("kaveri", GK_GFX7)
+ .Case("hawaii", GK_GFX7)
+ .Case("mullins", GK_GFX7)
+ .Case("gfx700", GK_GFX7)
+ .Case("gfx701", GK_GFX7)
+ .Case("gfx702", GK_GFX7)
+ .Case("tonga", GK_GFX8)
+ .Case("iceland", GK_GFX8)
+ .Case("carrizo", GK_GFX8)
+ .Case("fiji", GK_GFX8)
+ .Case("stoney", GK_GFX8)
+ .Case("polaris10", GK_GFX8)
+ .Case("polaris11", GK_GFX8)
+ .Case("gfx800", GK_GFX8)
+ .Case("gfx801", GK_GFX8)
+ .Case("gfx802", GK_GFX8)
+ .Case("gfx803", GK_GFX8)
+ .Case("gfx804", GK_GFX8)
+ .Case("gfx810", GK_GFX8)
.Default(GK_NONE);
}
@@ -2112,26 +2208,34 @@ public:
void setSupportedOpenCLOpts() override {
auto &Opts = getSupportedOpenCLOpts();
- Opts.cl_clang_storage_class_specifiers = 1;
- Opts.cl_khr_icd = 1;
+ Opts.support("cl_clang_storage_class_specifiers");
+ Opts.support("cl_khr_icd");
if (hasFP64)
- Opts.cl_khr_fp64 = 1;
+ Opts.support("cl_khr_fp64");
if (GPU >= GK_EVERGREEN) {
- Opts.cl_khr_byte_addressable_store = 1;
- Opts.cl_khr_global_int32_base_atomics = 1;
- Opts.cl_khr_global_int32_extended_atomics = 1;
- Opts.cl_khr_local_int32_base_atomics = 1;
- Opts.cl_khr_local_int32_extended_atomics = 1;
+ Opts.support("cl_khr_byte_addressable_store");
+ Opts.support("cl_khr_global_int32_base_atomics");
+ Opts.support("cl_khr_global_int32_extended_atomics");
+ Opts.support("cl_khr_local_int32_base_atomics");
+ Opts.support("cl_khr_local_int32_extended_atomics");
}
- if (GPU >= GK_SOUTHERN_ISLANDS) {
- Opts.cl_khr_fp16 = 1;
- Opts.cl_khr_int64_base_atomics = 1;
- Opts.cl_khr_int64_extended_atomics = 1;
- Opts.cl_khr_3d_image_writes = 1;
+ if (GPU >= GK_GFX6) {
+ Opts.support("cl_khr_fp16");
+ Opts.support("cl_khr_int64_base_atomics");
+ Opts.support("cl_khr_int64_extended_atomics");
+ Opts.support("cl_khr_mipmap_image");
+ Opts.support("cl_khr_subgroups");
+ Opts.support("cl_khr_3d_image_writes");
+ Opts.support("cl_amd_media_ops");
+ Opts.support("cl_amd_media_ops2");
}
}
+ LangAS::ID getOpenCLImageAddrSpace() const override {
+ return LangAS::opencl_constant;
+ }
+
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
switch (CC) {
default:
@@ -2141,6 +2245,13 @@ public:
return CCCR_OK;
}
}
+
+ // In amdgcn target the null pointer in global, constant, and generic
+ // address space has value 0 but in private and local address space has
+ // value ~0.
+ uint64_t getNullPointerValue(unsigned AS) const override {
+ return AS != LangAS::opencl_local && AS != 0 ? 0 : ~0;
+ }
};
const Builtin::Info AMDGPUTargetInfo::BuiltinInfo[] = {
@@ -2218,11 +2329,11 @@ bool AMDGPUTargetInfo::initFeatureMap(
CPU = "tahiti";
switch (parseAMDGCNName(CPU)) {
- case GK_SOUTHERN_ISLANDS:
- case GK_SEA_ISLANDS:
+ case GK_GFX6:
+ case GK_GFX7:
break;
- case GK_VOLCANIC_ISLANDS:
+ case GK_GFX8:
Features["s-memrealtime"] = true;
Features["16-bit-insts"] = true;
break;
@@ -2258,17 +2369,25 @@ bool AMDGPUTargetInfo::initFeatureMap(
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeatureVec);
}
-// Namespace for x86 abstract base class
-const Builtin::Info BuiltinInfo[] = {
+const Builtin::Info BuiltinInfoX86[] = {
#define BUILTIN(ID, TYPE, ATTRS) \
{ #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
-#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
- { #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
{ #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE },
+#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
+ { #ID, TYPE, ATTRS, HEADER, LANGS, FEATURE },
#include "clang/Basic/BuiltinsX86.def"
+
+#define BUILTIN(ID, TYPE, ATTRS) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr },
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ { #ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE },
+#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
+ { #ID, TYPE, ATTRS, HEADER, LANGS, FEATURE },
+#include "clang/Basic/BuiltinsX86_64.def"
};
+
static const char* const GCCRegNames[] = {
"ax", "dx", "cx", "bx", "si", "di", "bp", "sp",
"st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
@@ -2287,6 +2406,7 @@ static const char* const GCCRegNames[] = {
"zmm8", "zmm9", "zmm10", "zmm11", "zmm12", "zmm13", "zmm14", "zmm15",
"zmm16", "zmm17", "zmm18", "zmm19", "zmm20", "zmm21", "zmm22", "zmm23",
"zmm24", "zmm25", "zmm26", "zmm27", "zmm28", "zmm29", "zmm30", "zmm31",
+ "k0", "k1", "k2", "k3", "k4", "k5", "k6", "k7",
};
const TargetInfo::AddlRegName AddlRegNames[] = {
@@ -2626,17 +2746,12 @@ class X86TargetInfo : public TargetInfo {
public:
X86TargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
- BigEndian = false;
- LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
+ LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
}
unsigned getFloatEvalMethod() const override {
// X87 evaluates with 80 bits "long double" precision.
return SSELevel == NoSSE ? 2 : 0;
}
- ArrayRef<Builtin::Info> getTargetBuiltins() const override {
- return llvm::makeArrayRef(BuiltinInfo,
- clang::X86::LastTSBuiltin-Builtin::FirstTSBuiltin);
- }
ArrayRef<const char *> getGCCRegNames() const override {
return llvm::makeArrayRef(GCCRegNames);
}
@@ -2674,6 +2789,40 @@ public:
const char *getClobbers() const override {
return "~{dirflag},~{fpsr},~{flags}";
}
+
+ StringRef getConstraintRegister(const StringRef &Constraint,
+ const StringRef &Expression) const override {
+ StringRef::iterator I, E;
+ for (I = Constraint.begin(), E = Constraint.end(); I != E; ++I) {
+ if (isalpha(*I))
+ break;
+ }
+ if (I == E)
+ return "";
+ switch (*I) {
+ // For the register constraints, return the matching register name
+ case 'a':
+ return "ax";
+ case 'b':
+ return "bx";
+ case 'c':
+ return "cx";
+ case 'd':
+ return "dx";
+ case 'S':
+ return "si";
+ case 'D':
+ return "di";
+ // In case the constraint is 'r' we need to return Expression
+ case 'r':
+ return Expression;
+ default:
+ // Default value if there is no constraint for the register
+ return "";
+ }
+ return "";
+ }
+
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
static void setSSELevel(llvm::StringMap<bool> &Features, X86SSEEnum Level,
@@ -2797,6 +2946,7 @@ public:
case CC_X86FastCall:
case CC_X86StdCall:
case CC_X86VectorCall:
+ case CC_X86RegCall:
case CC_C:
case CC_Swift:
case CC_X86Pascal:
@@ -2816,7 +2966,7 @@ public:
}
void setSupportedOpenCLOpts() override {
- getSupportedOpenCLOpts().setAll();
+ getSupportedOpenCLOpts().supportAll();
}
};
@@ -3244,6 +3394,12 @@ void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
Name == "avx512vbmi" || Name == "avx512ifma") {
if (Enabled)
setSSELevel(Features, AVX512F, Enabled);
+ // Enable BWI instruction if VBMI is being enabled.
+ if (Name == "avx512vbmi" && Enabled)
+ Features["avx512bw"] = true;
+ // Also disable VBMI if BWI is being disabled.
+ if (Name == "avx512bw" && !Enabled)
+ Features["avx512vbmi"] = false;
} else if (Name == "fma") {
if (Enabled)
setSSELevel(Features, AVX, Enabled);
@@ -3891,6 +4047,7 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
case 't': // Any SSE register, when SSE2 is enabled.
case 'i': // Any SSE register, when SSE2 and inter-unit moves enabled.
case 'm': // Any MMX register, when inter-unit moves enabled.
+ case 'k': // AVX512 arch mask registers: k1-k7.
Info.setAllowsRegister();
return true;
}
@@ -3911,7 +4068,10 @@ X86TargetInfo::validateAsmConstraint(const char *&Name,
case 'u': // Second from top of floating point stack.
case 'q': // Any register accessible as [r]l: a, b, c, and d.
case 'y': // Any MMX register.
+ case 'v': // Any {X,Y,Z}MM register (Arch & context dependent)
case 'x': // Any SSE register.
+ case 'k': // Any AVX512 mask register (same as Yk, additionaly allows k0
+ // for intermideate k reg operations).
case 'Q': // Any register accessible as [r]h: a, b, c, and d.
case 'R': // "Legacy" registers: ax, bx, cx, dx, di, si, sp, bp.
case 'l': // "Index" registers: any general register that can be used as an
@@ -3945,12 +4105,15 @@ bool X86TargetInfo::validateOperandSize(StringRef Constraint,
unsigned Size) const {
switch (Constraint[0]) {
default: break;
+ case 'k':
+ // Registers k0-k7 (AVX512) size limit is 64 bit.
case 'y':
return Size <= 64;
case 'f':
case 't':
case 'u':
return Size <= 128;
+ case 'v':
case 'x':
if (SSELevel >= AVX512F)
// 512-bit zmm registers can be used if target supports AVX512F.
@@ -3965,6 +4128,7 @@ bool X86TargetInfo::validateOperandSize(StringRef Constraint,
default: break;
case 'm':
// 'Ym' is synonymous with 'y'.
+ case 'k':
return Size <= 64;
case 'i':
case 't':
@@ -3996,6 +4160,20 @@ X86TargetInfo::convertConstraint(const char *&Constraint) const {
return std::string("{st}");
case 'u': // second from top of floating point stack.
return std::string("{st(1)}"); // second from top of floating point stack.
+ case 'Y':
+ switch (Constraint[1]) {
+ default:
+ // Break from inner switch and fall through (copy single char),
+ // continue parsing after copying the current constraint into
+ // the return string.
+ break;
+ case 'k':
+ // "^" hints llvm that this is a 2 letter constraint.
+ // "Constraint++" is used to promote the string iterator
+ // to the next constraint.
+ return std::string("^") + std::string(Constraint++, 2);
+ }
+ LLVM_FALLTHROUGH;
default:
return std::string(1, *Constraint);
}
@@ -4055,6 +4233,10 @@ public:
return X86TargetInfo::validateOperandSize(Constraint, Size);
}
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfoX86, clang::X86::LastX86CommonBuiltin -
+ Builtin::FirstTSBuiltin + 1);
+ }
};
class NetBSDI386TargetInfo : public NetBSDTargetInfo<X86_32TargetInfo> {
@@ -4149,7 +4331,7 @@ public:
const TargetOptions &Opts)
: WindowsX86_32TargetInfo(Triple, Opts) {
LongDoubleWidth = LongDoubleAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
@@ -4248,7 +4430,7 @@ public:
MCUX86_32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: X86_32TargetInfo(Triple, Opts) {
LongDoubleWidth = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
resetDataLayout("e-m:e-p:32:32-i64:32-f64:32-f128:32-n8:16:32-a:0:32-S32");
WIntType = UnsignedInt;
}
@@ -4380,6 +4562,7 @@ public:
case CC_X86_64Win64:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_X86RegCall:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -4410,6 +4593,10 @@ public:
return X86TargetInfo::validateGlobalRegisterVariable(RegName, RegSize,
HasSizeMismatch);
}
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfoX86,
+ X86::LastTSBuiltin - Builtin::FirstTSBuiltin);
+ }
};
// x86-64 Windows target
@@ -4447,6 +4634,8 @@ public:
case CC_X86VectorCall:
case CC_IntelOclBicc:
case CC_X86_64SysV:
+ case CC_Swift:
+ case CC_X86RegCall:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -4461,7 +4650,7 @@ public:
const TargetOptions &Opts)
: WindowsX86_64TargetInfo(Triple, Opts) {
LongDoubleWidth = LongDoubleAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
@@ -4480,7 +4669,7 @@ public:
// Mingw64 rounds long double size and alignment up to 16 bytes, but sticks
// with x86 FP ops. Weird.
LongDoubleWidth = LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
+ LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
}
void getTargetDefines(const LangOptions &Opts,
@@ -4641,8 +4830,10 @@ class ARMTargetInfo : public TargetInfo {
DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 64;
const llvm::Triple &T = getTriple();
- // size_t is unsigned long on MachO-derived environments, NetBSD and Bitrig.
+ // size_t is unsigned long on MachO-derived environments, NetBSD,
+ // OpenBSD and Bitrig.
if (T.isOSBinFormatMachO() || T.getOS() == llvm::Triple::NetBSD ||
+ T.getOS() == llvm::Triple::OpenBSD ||
T.getOS() == llvm::Triple::Bitrig)
SizeType = UnsignedLong;
else
@@ -4650,6 +4841,7 @@ class ARMTargetInfo : public TargetInfo {
switch (T.getOS()) {
case llvm::Triple::NetBSD:
+ case llvm::Triple::OpenBSD:
WCharType = SignedInt;
break;
case llvm::Triple::Win32:
@@ -4824,6 +5016,8 @@ class ARMTargetInfo : public TargetInfo {
return "8M_BASE";
case llvm::ARM::AK_ARMV8MMainline:
return "8M_MAIN";
+ case llvm::ARM::AK_ARMV8R:
+ return "8R";
}
}
@@ -4841,14 +5035,13 @@ class ARMTargetInfo : public TargetInfo {
}
public:
- ARMTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts,
- bool IsBigEndian)
+ ARMTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: TargetInfo(Triple), FPMath(FP_Default), IsAAPCS(true), LDREX(0),
HW_FP(0) {
- BigEndian = IsBigEndian;
switch (getTriple().getOS()) {
case llvm::Triple::NetBSD:
+ case llvm::Triple::OpenBSD:
PtrDiffType = SignedLong;
break;
default:
@@ -4871,7 +5064,7 @@ public:
// the frontend matches that.
if (Triple.getEnvironment() == llvm::Triple::EABI ||
Triple.getOS() == llvm::Triple::UnknownOS ||
- StringRef(CPU).startswith("cortex-m")) {
+ ArchProfile == llvm::ARM::PK_M) {
setABI("aapcs");
} else if (Triple.isWatchABI()) {
setABI("aapcs16");
@@ -4951,7 +5144,7 @@ public:
StringRef CPU,
const std::vector<std::string> &FeaturesVec) const override {
- std::vector<const char*> TargetFeatures;
+ std::vector<StringRef> TargetFeatures;
unsigned Arch = llvm::ARM::parseArch(getTriple().getArchName());
// get default FPU features
@@ -4962,9 +5155,9 @@ public:
unsigned Extensions = llvm::ARM::getDefaultExtensions(CPU, Arch);
llvm::ARM::getExtensionFeatures(Extensions, TargetFeatures);
- for (const char *Feature : TargetFeatures)
+ for (auto Feature : TargetFeatures)
if (Feature[0] == '+')
- Features[Feature+1] = true;
+ Features[Feature.drop_front(1)] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -5200,7 +5393,7 @@ public:
if (SoftFloat)
Builder.defineMacro("__SOFTFP__");
- if (CPU == "xscale")
+ if (ArchKind == llvm::ARM::AK_XSCALE)
Builder.defineMacro("__XSCALE__");
if (isThumb()) {
@@ -5232,6 +5425,8 @@ public:
Builder.defineMacro("__ARM_VFPV3__");
if (FPU & VFP4FPU)
Builder.defineMacro("__ARM_VFPV4__");
+ if (FPU & FPARMV8)
+ Builder.defineMacro("__ARM_FPV5__");
}
// This only gets set when Neon instructions are actually available, unlike
@@ -5479,13 +5674,15 @@ const Builtin::Info ARMTargetInfo::BuiltinInfo[] = {
{ #ID, TYPE, ATTRS, nullptr, LANG, nullptr },
#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) \
{ #ID, TYPE, ATTRS, HEADER, ALL_LANGUAGES, nullptr },
+#define TARGET_HEADER_BUILTIN(ID, TYPE, ATTRS, HEADER, LANGS, FEATURE) \
+ { #ID, TYPE, ATTRS, HEADER, LANGS, FEATURE },
#include "clang/Basic/BuiltinsARM.def"
};
class ARMleTargetInfo : public ARMTargetInfo {
public:
ARMleTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : ARMTargetInfo(Triple, Opts, /*BigEndian=*/false) {}
+ : ARMTargetInfo(Triple, Opts) {}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
Builder.defineMacro("__ARMEL__");
@@ -5496,7 +5693,7 @@ public:
class ARMbeTargetInfo : public ARMTargetInfo {
public:
ARMbeTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : ARMTargetInfo(Triple, Opts, /*BigEndian=*/true) {}
+ : ARMTargetInfo(Triple, Opts) {}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
Builder.defineMacro("__ARMEB__");
@@ -5697,7 +5894,7 @@ public:
MaxAtomicPromoteWidth = 128;
LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
// {} in inline assembly are neon specifiers, not assembly variant
// specifiers.
@@ -5728,16 +5925,9 @@ public:
}
bool setCPU(const std::string &Name) override {
- bool CPUKnown = llvm::StringSwitch<bool>(Name)
- .Case("generic", true)
- .Cases("cortex-a53", "cortex-a57", "cortex-a72",
- "cortex-a35", "exynos-m1", true)
- .Case("cortex-a73", true)
- .Case("cyclone", true)
- .Case("kryo", true)
- .Case("vulcan", true)
- .Default(false);
- return CPUKnown;
+ return Name == "generic" ||
+ llvm::AArch64::parseCPUArch(Name) !=
+ static_cast<unsigned>(llvm::AArch64::ArchKind::AK_INVALID);
}
void getTargetDefines(const LangOptions &Opts,
@@ -6014,7 +6204,6 @@ class AArch64leTargetInfo : public AArch64TargetInfo {
public:
AArch64leTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: AArch64TargetInfo(Triple, Opts) {
- BigEndian = false;
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
@@ -6064,7 +6253,7 @@ public:
UseSignedCharForObjCBool = false;
LongDoubleWidth = LongDoubleAlign = SuitableAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
TheCXXABI.set(TargetCXXABI::iOS64);
}
@@ -6081,11 +6270,11 @@ class HexagonTargetInfo : public TargetInfo {
static const TargetInfo::GCCRegAlias GCCRegAliases[];
std::string CPU;
bool HasHVX, HasHVXDouble;
+ bool UseLongCalls;
public:
HexagonTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
- BigEndian = false;
// Specify the vector alignment explicitly. For v512x1, the calculated
// alignment would be 512*alignment(i1), which is 512 bytes, instead of
// the required minimum of 64 bytes.
@@ -6105,6 +6294,7 @@ public:
UseBitFieldTypeAlignment = true;
ZeroLengthBitfieldBoundary = 32;
HasHVX = HasHVXDouble = false;
+ UseLongCalls = false;
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
@@ -6139,6 +6329,7 @@ public:
.Case("hexagon", true)
.Case("hvx", HasHVX)
.Case("hvx-double", HasHVXDouble)
+ .Case("long-calls", UseLongCalls)
.Default(false);
}
@@ -6149,6 +6340,9 @@ public:
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override;
+ void setFeatureEnabled(llvm::StringMap<bool> &Features, StringRef Name,
+ bool Enabled) const override;
+
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::CharPtrBuiltinVaList;
}
@@ -6217,6 +6411,17 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
}
}
+bool HexagonTargetInfo::initFeatureMap(llvm::StringMap<bool> &Features,
+ DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+ // Default for v60: -hvx, -hvx-double.
+ Features["hvx"] = false;
+ Features["hvx-double"] = false;
+ Features["long-calls"] = false;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+}
+
bool HexagonTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
for (auto &F : Features) {
@@ -6228,21 +6433,27 @@ bool HexagonTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasHVX = HasHVXDouble = true;
else if (F == "-hvx-double")
HasHVXDouble = false;
+
+ if (F == "+long-calls")
+ UseLongCalls = true;
+ else if (F == "-long-calls")
+ UseLongCalls = false;
}
return true;
}
-bool HexagonTargetInfo::initFeatureMap(llvm::StringMap<bool> &Features,
- DiagnosticsEngine &Diags, StringRef CPU,
- const std::vector<std::string> &FeaturesVec) const {
- // Default for v60: -hvx, -hvx-double.
- Features["hvx"] = false;
- Features["hvx-double"] = false;
-
- return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+void HexagonTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name, bool Enabled) const {
+ if (Enabled) {
+ if (Name == "hvx-double")
+ Features["hvx"] = true;
+ } else {
+ if (Name == "hvx")
+ Features["hvx-double"] = false;
+ }
+ Features[Name] = Enabled;
}
-
const char *const HexagonTargetInfo::GCCRegNames[] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
@@ -6472,8 +6683,9 @@ public:
CK_NIAGARA2,
CK_NIAGARA3,
CK_NIAGARA4,
- CK_MYRIAD2_1,
- CK_MYRIAD2_2,
+ CK_MYRIAD2100,
+ CK_MYRIAD2150,
+ CK_MYRIAD2450,
CK_LEON2,
CK_LEON2_AT697E,
CK_LEON2_AT697F,
@@ -6500,8 +6712,9 @@ public:
case CK_SPARCLITE86X:
case CK_SPARCLET:
case CK_TSC701:
- case CK_MYRIAD2_1:
- case CK_MYRIAD2_2:
+ case CK_MYRIAD2100:
+ case CK_MYRIAD2150:
+ case CK_MYRIAD2450:
case CK_LEON2:
case CK_LEON2_AT697E:
case CK_LEON2_AT697F:
@@ -6540,9 +6753,14 @@ public:
.Case("niagara2", CK_NIAGARA2)
.Case("niagara3", CK_NIAGARA3)
.Case("niagara4", CK_NIAGARA4)
- .Case("myriad2", CK_MYRIAD2_1)
- .Case("myriad2.1", CK_MYRIAD2_1)
- .Case("myriad2.2", CK_MYRIAD2_2)
+ .Case("ma2100", CK_MYRIAD2100)
+ .Case("ma2150", CK_MYRIAD2150)
+ .Case("ma2450", CK_MYRIAD2450)
+ // FIXME: the myriad2[.n] spellings are obsolete,
+ // but a grace period is needed to allow updating dependent builds.
+ .Case("myriad2", CK_MYRIAD2100)
+ .Case("myriad2.1", CK_MYRIAD2100)
+ .Case("myriad2.2", CK_MYRIAD2150)
.Case("leon2", CK_LEON2)
.Case("at697e", CK_LEON2_AT697E)
.Case("at697f", CK_LEON2_AT697F)
@@ -6630,7 +6848,10 @@ public:
PtrDiffType = SignedLong;
break;
}
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ // Up to 32 bits are lock-free atomic, but we're willing to do atomic ops
+ // on up to 64 bits.
+ MaxAtomicPromoteWidth = 64;
+ MaxAtomicInlineWidth = 32;
}
void getTargetDefines(const LangOptions &Opts,
@@ -6651,18 +6872,27 @@ public:
break;
}
if (getTriple().getVendor() == llvm::Triple::Myriad) {
+ std::string MyriadArchValue, Myriad2Value;
+ Builder.defineMacro("__sparc_v8__");
+ Builder.defineMacro("__leon__");
switch (CPU) {
- case CK_MYRIAD2_1:
- Builder.defineMacro("__myriad2", "1");
- Builder.defineMacro("__myriad2__", "1");
+ case CK_MYRIAD2150:
+ MyriadArchValue = "__ma2150";
+ Myriad2Value = "2";
break;
- case CK_MYRIAD2_2:
- Builder.defineMacro("__myriad2", "2");
- Builder.defineMacro("__myriad2__", "2");
+ case CK_MYRIAD2450:
+ MyriadArchValue = "__ma2450";
+ Myriad2Value = "2";
break;
default:
+ MyriadArchValue = "__ma2100";
+ Myriad2Value = "1";
break;
}
+ Builder.defineMacro(MyriadArchValue, "1");
+ Builder.defineMacro(MyriadArchValue+"__", "1");
+ Builder.defineMacro("__myriad2__", Myriad2Value);
+ Builder.defineMacro("__myriad2", Myriad2Value);
}
}
@@ -6677,7 +6907,6 @@ class SparcV8elTargetInfo : public SparcV8TargetInfo {
SparcV8elTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: SparcV8TargetInfo(Triple, Opts) {
resetDataLayout("e-m:e-p:32:32-i64:64-f128:64-n32-S64");
- BigEndian = false;
}
};
@@ -6702,7 +6931,7 @@ public:
// aligned. The SPARCv9 SCD 2.4.1 says 16-byte aligned.
LongDoubleWidth = 128;
LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
}
@@ -6745,7 +6974,7 @@ public:
PointerWidth = PointerAlign = 64;
LongDoubleWidth = 128;
LongDoubleAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
DefaultAlignForAttributeAligned = 64;
MinGlobalAlign = 16;
resetDataLayout("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64");
@@ -6791,9 +7020,13 @@ public:
CPU = Name;
bool CPUKnown = llvm::StringSwitch<bool>(Name)
.Case("z10", true)
+ .Case("arch8", true)
.Case("z196", true)
+ .Case("arch9", true)
.Case("zEC12", true)
+ .Case("arch10", true)
.Case("z13", true)
+ .Case("arch11", true)
.Default(false);
return CPUKnown;
@@ -6802,9 +7035,9 @@ public:
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
StringRef CPU,
const std::vector<std::string> &FeaturesVec) const override {
- if (CPU == "zEC12")
+ if (CPU == "zEC12" || CPU == "arch10")
Features["transactional-execution"] = true;
- if (CPU == "z13") {
+ if (CPU == "z13" || CPU == "arch11") {
Features["transactional-execution"] = true;
Features["vector"] = true;
}
@@ -6912,7 +7145,6 @@ class MSP430TargetInfo : public TargetInfo {
public:
MSP430TargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
- BigEndian = false;
TLSSupported = false;
IntWidth = 16;
IntAlign = 16;
@@ -7018,11 +7250,14 @@ public:
DoubleAlign = 32;
LongDoubleWidth = 32;
LongDoubleAlign = 32;
- FloatFormat = &llvm::APFloat::IEEEsingle;
- DoubleFormat = &llvm::APFloat::IEEEsingle;
- LongDoubleFormat = &llvm::APFloat::IEEEsingle;
- resetDataLayout("E-p:32:32-i8:8:32-i16:16:32-i64:32"
- "-f64:32-v64:32-v128:32-a:0:32-n32");
+ FloatFormat = &llvm::APFloat::IEEEsingle();
+ DoubleFormat = &llvm::APFloat::IEEEsingle();
+ LongDoubleFormat = &llvm::APFloat::IEEEsingle();
+ resetDataLayout("E-p:32:32:32-i1:8:8-i8:8:32-"
+ "i16:16:32-i32:32:32-i64:32:32-"
+ "f32:32:32-f64:32:32-v64:32:32-"
+ "v128:32:32-v256:32:32-v512:32:32-"
+ "v1024:32:32-a0:0:32-n32");
AddrSpaceMap = &TCEOpenCLAddrSpaceMap;
UseAddrSpaceMapMangling = true;
}
@@ -7050,6 +7285,31 @@ public:
}
};
+class TCELETargetInfo : public TCETargetInfo {
+public:
+ TCELETargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : TCETargetInfo(Triple, Opts) {
+ BigEndian = false;
+
+ resetDataLayout("e-p:32:32:32-i1:8:8-i8:8:32-"
+ "i16:16:32-i32:32:32-i64:32:32-"
+ "f32:32:32-f64:32:32-v64:32:32-"
+ "v128:32:32-v256:32:32-v512:32:32-"
+ "v1024:32:32-a0:0:32-n32");
+
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "tcele", Opts);
+ Builder.defineMacro("__TCE__");
+ Builder.defineMacro("__TCE_V1__");
+ Builder.defineMacro("__TCELE__");
+ Builder.defineMacro("__TCELE_V1__");
+ }
+
+};
+
class BPFTargetInfo : public TargetInfo {
public:
BPFTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
@@ -7062,10 +7322,8 @@ public:
Int64Type = SignedLong;
RegParmMax = 5;
if (Triple.getArch() == llvm::Triple::bpfeb) {
- BigEndian = true;
resetDataLayout("E-m:e-p:64:64-i64:64-n32:64-S128");
} else {
- BigEndian = false;
resetDataLayout("e-m:e-p:64:64-i64:64-n32:64-S128");
}
MaxAtomicPromoteWidth = 64;
@@ -7144,8 +7402,6 @@ public:
IsNan2008(false), IsSingleFloat(false), FloatABI(HardFloat),
DspRev(NoDSP), HasMSA(false), HasFP64(false) {
TheCXXABI.set(TargetCXXABI::GenericMIPS);
- BigEndian = getTriple().getArch() == llvm::Triple::mips ||
- getTriple().getArch() == llvm::Triple::mips64;
setABI((getTriple().getArch() == llvm::Triple::mips ||
getTriple().getArch() == llvm::Triple::mipsel)
@@ -7206,7 +7462,7 @@ public:
void setO32ABITypes() {
Int64Type = SignedLongLong;
IntMaxType = Int64Type;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
LongDoubleWidth = LongDoubleAlign = 64;
LongWidth = LongAlign = 32;
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
@@ -7218,10 +7474,10 @@ public:
void setN32N64ABITypes() {
LongDoubleWidth = LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
if (getTriple().getOS() == llvm::Triple::FreeBSD) {
LongDoubleWidth = LongDoubleAlign = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
SuitableAlign = 128;
@@ -7648,7 +7904,6 @@ class PNaClTargetInfo : public TargetInfo {
public:
PNaClTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: TargetInfo(Triple) {
- BigEndian = false;
this->LongAlign = 32;
this->LongWidth = 32;
this->PointerAlign = 32;
@@ -7716,7 +7971,6 @@ class Le64TargetInfo : public TargetInfo {
public:
Le64TargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
- BigEndian = false;
NoAsmVariants = true;
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
@@ -7762,7 +8016,6 @@ class WebAssemblyTargetInfo : public TargetInfo {
public:
explicit WebAssemblyTargetInfo(const llvm::Triple &T, const TargetOptions &)
: TargetInfo(T), SIMDLevel(NoSIMD) {
- BigEndian = false;
NoAsmVariants = true;
SuitableAlign = 128;
LargeArrayMinWidth = 128;
@@ -7770,7 +8023,10 @@ public:
SimdDefaultAlign = 128;
SigAtomicType = SignedLong;
LongDoubleWidth = LongDoubleAlign = 128;
- LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
}
protected:
@@ -7889,6 +8145,9 @@ public:
LongAlign = LongWidth = 64;
PointerAlign = PointerWidth = 64;
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ IntPtrType = SignedLong;
resetDataLayout("e-m:e-p:64:64-i64:64-n32:64-S128");
}
@@ -7923,7 +8182,6 @@ public:
"SPIR target must use unknown OS");
assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
"SPIR target must use unknown environment type");
- BigEndian = false;
TLSSupported = false;
LongWidth = LongAlign = 64;
AddrSpaceMap = &SPIRAddrSpaceMap;
@@ -7966,7 +8224,7 @@ public:
void setSupportedOpenCLOpts() override {
// Assume all OpenCL extensions and optional core features are supported
// for SPIR since it is a generic target.
- getSupportedOpenCLOpts().setAll();
+ getSupportedOpenCLOpts().supportAll();
}
};
@@ -8007,7 +8265,6 @@ class XCoreTargetInfo : public TargetInfo {
public:
XCoreTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
- BigEndian = false;
NoAsmVariants = true;
LongLongAlign = 32;
SuitableAlign = 32;
@@ -8073,7 +8330,7 @@ public:
: LinuxTargetInfo<X86_32TargetInfo>(Triple, Opts) {
SuitableAlign = 32;
LongDoubleWidth = 64;
- LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
}
};
@@ -8082,7 +8339,7 @@ class AndroidX86_64TargetInfo : public LinuxTargetInfo<X86_64TargetInfo> {
public:
AndroidX86_64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: LinuxTargetInfo<X86_64TargetInfo>(Triple, Opts) {
- LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad();
}
bool useFloat128ManglingForLongDouble() const override {
@@ -8099,6 +8356,7 @@ public:
Triple.getOSName(),
Triple.getEnvironmentName()),
Opts) {
+ IsRenderScriptTarget = true;
LongWidth = LongAlign = 64;
}
void getTargetDefines(const LangOptions &Opts,
@@ -8116,7 +8374,9 @@ public:
: AArch64leTargetInfo(llvm::Triple("aarch64", Triple.getVendorName(),
Triple.getOSName(),
Triple.getEnvironmentName()),
- Opts) {}
+ Opts) {
+ IsRenderScriptTarget = true;
+ }
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
@@ -8157,6 +8417,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new CloudABITargetInfo<AArch64leTargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ case llvm::Triple::Fuchsia:
+ return new FuchsiaTargetInfo<AArch64leTargetInfo>(Triple, Opts);
case llvm::Triple::Linux:
return new LinuxTargetInfo<AArch64leTargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
@@ -8169,6 +8431,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
switch (os) {
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<AArch64beTargetInfo>(Triple, Opts);
+ case llvm::Triple::Fuchsia:
+ return new FuchsiaTargetInfo<AArch64beTargetInfo>(Triple, Opts);
case llvm::Triple::Linux:
return new LinuxTargetInfo<AArch64beTargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
@@ -8189,6 +8453,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new LinuxTargetInfo<ARMleTargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
+ case llvm::Triple::Fuchsia:
+ return new FuchsiaTargetInfo<ARMleTargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
case llvm::Triple::OpenBSD:
@@ -8225,6 +8491,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new LinuxTargetInfo<ARMbeTargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
+ case llvm::Triple::Fuchsia:
+ return new FuchsiaTargetInfo<ARMbeTargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
case llvm::Triple::OpenBSD:
@@ -8430,6 +8698,9 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
case llvm::Triple::tce:
return new TCETargetInfo(Triple, Opts);
+ case llvm::Triple::tcele:
+ return new TCELETargetInfo(Triple, Opts);
+
case llvm::Triple::x86:
if (Triple.isOSDarwin())
return new DarwinI386TargetInfo(Triple, Opts);
@@ -8455,6 +8726,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new BitrigI386TargetInfo(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
+ case llvm::Triple::Fuchsia:
+ return new FuchsiaTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::KFreeBSD:
return new KFreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Minix:
@@ -8510,6 +8783,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new BitrigX86_64TargetInfo(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ case llvm::Triple::Fuchsia:
+ return new FuchsiaTargetInfo<X86_64TargetInfo>(Triple, Opts);
case llvm::Triple::KFreeBSD:
return new KFreeBSDTargetInfo<X86_64TargetInfo>(Triple, Opts);
case llvm::Triple::Solaris:
@@ -8612,6 +8887,7 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
return nullptr;
Target->setSupportedOpenCLOpts();
+ Target->setOpenCLExtensionOpts();
if (!Target->validateTarget(Diags))
return nullptr;
diff --git a/lib/Basic/Version.cpp b/lib/Basic/Version.cpp
index 4fa52b4acce0..a1a67c2bc144 100644
--- a/lib/Basic/Version.cpp
+++ b/lib/Basic/Version.cpp
@@ -36,7 +36,7 @@ std::string getClangRepositoryPath() {
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_391/final/lib/Basic/Version.cpp $");
+ StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
diff --git a/lib/Basic/VirtualFileSystem.cpp b/lib/Basic/VirtualFileSystem.cpp
index 8ace2b3dc838..50fcb22faf53 100644
--- a/lib/Basic/VirtualFileSystem.cpp
+++ b/lib/Basic/VirtualFileSystem.cpp
@@ -47,7 +47,7 @@ Status::Status(const file_status &Status)
User(Status.getUser()), Group(Status.getGroup()), Size(Status.getSize()),
Type(Status.type()), Perms(Status.permissions()), IsVFSMapped(false) {}
-Status::Status(StringRef Name, UniqueID UID, sys::TimeValue MTime,
+Status::Status(StringRef Name, UniqueID UID, sys::TimePoint<> MTime,
uint32_t User, uint32_t Group, uint64_t Size, file_type Type,
perms Perms)
: Name(Name), UID(UID), MTime(MTime), User(User), Group(Group), Size(Size),
@@ -494,8 +494,8 @@ public:
InMemoryFileSystem::InMemoryFileSystem(bool UseNormalizedPaths)
: Root(new detail::InMemoryDirectory(
- Status("", getNextVirtualUniqueID(), llvm::sys::TimeValue::MinTime(),
- 0, 0, 0, llvm::sys::fs::file_type::directory_file,
+ Status("", getNextVirtualUniqueID(), llvm::sys::TimePoint<>(), 0, 0,
+ 0, llvm::sys::fs::file_type::directory_file,
llvm::sys::fs::perms::all_all))),
UseNormalizedPaths(UseNormalizedPaths) {}
@@ -532,7 +532,7 @@ bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
// End of the path, create a new file.
// FIXME: expose the status details in the interface.
Status Stat(P.str(), getNextVirtualUniqueID(),
- llvm::sys::TimeValue(ModificationTime, 0), 0, 0,
+ llvm::sys::toTimePoint(ModificationTime), 0, 0,
Buffer->getBufferSize(),
llvm::sys::fs::file_type::regular_file,
llvm::sys::fs::all_all);
@@ -545,9 +545,9 @@ bool InMemoryFileSystem::addFile(const Twine &P, time_t ModificationTime,
// FIXME: expose the status details in the interface.
Status Stat(
StringRef(Path.str().begin(), Name.end() - Path.str().begin()),
- getNextVirtualUniqueID(), llvm::sys::TimeValue(ModificationTime, 0),
- 0, 0, Buffer->getBufferSize(),
- llvm::sys::fs::file_type::directory_file, llvm::sys::fs::all_all);
+ getNextVirtualUniqueID(), llvm::sys::toTimePoint(ModificationTime), 0,
+ 0, Buffer->getBufferSize(), llvm::sys::fs::file_type::directory_file,
+ llvm::sys::fs::all_all);
Dir = cast<detail::InMemoryDirectory>(Dir->addChild(
Name, llvm::make_unique<detail::InMemoryDirectory>(std::move(Stat))));
continue;
@@ -801,6 +801,7 @@ public:
/// 'case-sensitive': <boolean, default=true>
/// 'use-external-names': <boolean, default=true>
/// 'overlay-relative': <boolean, default=false>
+/// 'ignore-non-existent-contents': <boolean, default=true>
///
/// Virtual directories are represented as
/// \verbatim
@@ -860,6 +861,14 @@ class RedirectingFileSystem : public vfs::FileSystem {
/// \brief Whether to use to use the value of 'external-contents' for the
/// names of files. This global value is overridable on a per-file basis.
bool UseExternalNames = true;
+
+ /// \brief Whether an invalid path obtained via 'external-contents' should
+ /// cause iteration on the VFS to stop. If 'true', the VFS should ignore
+ /// the entry and continue with the next. Allows YAML files to be shared
+ /// across multiple compiler invocations regardless of prior existent
+ /// paths in 'external-contents'. This global value is overridable on a
+ /// per-file basis.
+ bool IgnoreNonExistentContents = true;
/// @}
/// Virtual file paths and external files could be canonicalized without "..",
@@ -878,9 +887,6 @@ private:
RedirectingFileSystem(IntrusiveRefCntPtr<FileSystem> ExternalFS)
: ExternalFS(std::move(ExternalFS)) {}
- /// \brief Looks up \p Path in \c Roots.
- ErrorOr<Entry *> lookupPath(const Twine &Path);
-
/// \brief Looks up the path <tt>[Start, End)</tt> in \p From, possibly
/// recursing into the contents of \p From if it is a directory.
ErrorOr<Entry *> lookupPath(sys::path::const_iterator Start,
@@ -890,6 +896,9 @@ private:
ErrorOr<Status> status(const Twine &Path, Entry *E);
public:
+ /// \brief Looks up \p Path in \c Roots.
+ ErrorOr<Entry *> lookupPath(const Twine &Path);
+
/// \brief Parses \p Buffer, which is expected to be in YAML format and
/// returns a virtual file system representing its contents.
static RedirectingFileSystem *
@@ -937,6 +946,10 @@ public:
return ExternalContentsPrefixDir;
}
+ bool ignoreNonExistentContents() const {
+ return IgnoreNonExistentContents;
+ }
+
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void dump() const {
for (const std::unique_ptr<Entry> &Root : Roots)
@@ -1060,8 +1073,9 @@ class RedirectingFileSystemParser {
// ... or create a new one
std::unique_ptr<Entry> E = llvm::make_unique<RedirectingDirectoryEntry>(
- Name, Status("", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0,
- 0, file_type::directory_file, sys::fs::all_all));
+ Name,
+ Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
+ 0, 0, 0, file_type::directory_file, sys::fs::all_all));
if (!ParentEntry) { // Add a new root to the overlay
FS->Roots.push_back(std::move(E));
@@ -1262,8 +1276,8 @@ class RedirectingFileSystemParser {
case EK_Directory:
Result = llvm::make_unique<RedirectingDirectoryEntry>(
LastComponent, std::move(EntryArrayContents),
- Status("", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0, 0,
- file_type::directory_file, sys::fs::all_all));
+ Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
+ 0, 0, 0, file_type::directory_file, sys::fs::all_all));
break;
}
@@ -1279,8 +1293,8 @@ class RedirectingFileSystemParser {
Entries.push_back(std::move(Result));
Result = llvm::make_unique<RedirectingDirectoryEntry>(
*I, std::move(Entries),
- Status("", getNextVirtualUniqueID(), sys::TimeValue::now(), 0, 0, 0,
- file_type::directory_file, sys::fs::all_all));
+ Status("", getNextVirtualUniqueID(), std::chrono::system_clock::now(),
+ 0, 0, 0, file_type::directory_file, sys::fs::all_all));
}
return Result;
}
@@ -1301,6 +1315,7 @@ public:
KeyStatusPair("case-sensitive", false),
KeyStatusPair("use-external-names", false),
KeyStatusPair("overlay-relative", false),
+ KeyStatusPair("ignore-non-existent-contents", false),
KeyStatusPair("roots", true),
};
@@ -1359,6 +1374,9 @@ public:
} else if (Key == "use-external-names") {
if (!parseScalarBool(I->getValue(), FS->UseExternalNames))
return false;
+ } else if (Key == "ignore-non-existent-contents") {
+ if (!parseScalarBool(I->getValue(), FS->IgnoreNonExistentContents))
+ return false;
} else {
llvm_unreachable("key missing from Keys");
}
@@ -1588,6 +1606,47 @@ vfs::getVFSFromYAML(std::unique_ptr<MemoryBuffer> Buffer,
std::move(ExternalFS));
}
+static void getVFSEntries(Entry *SrcE, SmallVectorImpl<StringRef> &Path,
+ SmallVectorImpl<YAMLVFSEntry> &Entries) {
+ auto Kind = SrcE->getKind();
+ if (Kind == EK_Directory) {
+ auto *DE = dyn_cast<RedirectingDirectoryEntry>(SrcE);
+ assert(DE && "Must be a directory");
+ for (std::unique_ptr<Entry> &SubEntry :
+ llvm::make_range(DE->contents_begin(), DE->contents_end())) {
+ Path.push_back(SubEntry->getName());
+ getVFSEntries(SubEntry.get(), Path, Entries);
+ Path.pop_back();
+ }
+ return;
+ }
+
+ assert(Kind == EK_File && "Must be a EK_File");
+ auto *FE = dyn_cast<RedirectingFileEntry>(SrcE);
+ assert(FE && "Must be a file");
+ SmallString<128> VPath;
+ for (auto &Comp : Path)
+ llvm::sys::path::append(VPath, Comp);
+ Entries.push_back(YAMLVFSEntry(VPath.c_str(), FE->getExternalContentsPath()));
+}
+
+void vfs::collectVFSFromYAML(std::unique_ptr<MemoryBuffer> Buffer,
+ SourceMgr::DiagHandlerTy DiagHandler,
+ StringRef YAMLFilePath,
+ SmallVectorImpl<YAMLVFSEntry> &CollectedEntries,
+ void *DiagContext,
+ IntrusiveRefCntPtr<FileSystem> ExternalFS) {
+ RedirectingFileSystem *VFS = RedirectingFileSystem::create(
+ std::move(Buffer), DiagHandler, YAMLFilePath, DiagContext,
+ std::move(ExternalFS));
+ ErrorOr<Entry *> RootE = VFS->lookupPath("/");
+ if (!RootE)
+ return;
+ SmallVector<StringRef, 8> Components;
+ Components.push_back("/");
+ getVFSEntries(*RootE, Components, CollectedEntries);
+}
+
UniqueID vfs::getNextVirtualUniqueID() {
static std::atomic<unsigned> UID;
unsigned ID = ++UID;
@@ -1619,7 +1678,7 @@ public:
JSONWriter(llvm::raw_ostream &OS) : OS(OS) {}
void write(ArrayRef<YAMLVFSEntry> Entries, Optional<bool> UseExternalNames,
Optional<bool> IsCaseSensitive, Optional<bool> IsOverlayRelative,
- StringRef OverlayDir);
+ Optional<bool> IgnoreNonExistentContents, StringRef OverlayDir);
};
}
@@ -1675,6 +1734,7 @@ void JSONWriter::write(ArrayRef<YAMLVFSEntry> Entries,
Optional<bool> UseExternalNames,
Optional<bool> IsCaseSensitive,
Optional<bool> IsOverlayRelative,
+ Optional<bool> IgnoreNonExistentContents,
StringRef OverlayDir) {
using namespace llvm::sys;
@@ -1692,6 +1752,9 @@ void JSONWriter::write(ArrayRef<YAMLVFSEntry> Entries,
OS << " 'overlay-relative': '"
<< (UseOverlayRelative ? "true" : "false") << "',\n";
}
+ if (IgnoreNonExistentContents.hasValue())
+ OS << " 'ignore-non-existent-contents': '"
+ << (IgnoreNonExistentContents.getValue() ? "true" : "false") << "',\n";
OS << " 'roots': [\n";
if (!Entries.empty()) {
@@ -1748,7 +1811,8 @@ void YAMLVFSWriter::write(llvm::raw_ostream &OS) {
});
JSONWriter(OS).write(Mappings, UseExternalNames, IsCaseSensitive,
- IsOverlayRelative, OverlayDir);
+ IsOverlayRelative, IgnoreNonExistentContents,
+ OverlayDir);
}
VFSFromYamlDirIterImpl::VFSFromYamlDirIterImpl(
@@ -1756,29 +1820,47 @@ VFSFromYamlDirIterImpl::VFSFromYamlDirIterImpl(
RedirectingDirectoryEntry::iterator Begin,
RedirectingDirectoryEntry::iterator End, std::error_code &EC)
: Dir(_Path.str()), FS(FS), Current(Begin), End(End) {
- if (Current != End) {
+ while (Current != End) {
SmallString<128> PathStr(Dir);
llvm::sys::path::append(PathStr, (*Current)->getName());
llvm::ErrorOr<vfs::Status> S = FS.status(PathStr);
- if (S)
+ if (S) {
CurrentEntry = *S;
- else
+ return;
+ }
+ // Skip entries which do not map to a reliable external content.
+ if (FS.ignoreNonExistentContents() &&
+ S.getError() == llvm::errc::no_such_file_or_directory) {
+ ++Current;
+ continue;
+ } else {
EC = S.getError();
+ break;
+ }
}
}
std::error_code VFSFromYamlDirIterImpl::increment() {
assert(Current != End && "cannot iterate past end");
- if (++Current != End) {
+ while (++Current != End) {
SmallString<128> PathStr(Dir);
llvm::sys::path::append(PathStr, (*Current)->getName());
llvm::ErrorOr<vfs::Status> S = FS.status(PathStr);
- if (!S)
- return S.getError();
+ if (!S) {
+ // Skip entries which do not map to a reliable external content.
+ if (FS.ignoreNonExistentContents() &&
+ S.getError() == llvm::errc::no_such_file_or_directory) {
+ continue;
+ } else {
+ return S.getError();
+ }
+ }
CurrentEntry = *S;
- } else {
- CurrentEntry = Status();
+ break;
}
+
+ if (Current == End)
+ CurrentEntry = Status();
return std::error_code();
}
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 530a7ef560c5..ac31dfdaf3e4 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -142,6 +142,8 @@ namespace swiftcall {
llvm::Type *eltTy,
unsigned elts) const;
+ virtual bool isSwiftErrorInRegister() const = 0;
+
static bool classof(const ABIInfo *info) {
return info->supportsSwift();
}
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 165b6dd55c9b..164e52d7de27 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -14,24 +14,29 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/Bitcode/BitcodeWriterPass.h"
-#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/Verifier.h"
+#include "llvm/LTO/LTOBackend.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Object/ModuleSummaryIndexObjectFile.h"
+#include "llvm/Passes/PassBuilder.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/Timer.h"
@@ -39,7 +44,9 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Transforms/Coroutines.h"
#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/ObjCARC.h"
@@ -74,8 +81,7 @@ private:
/// Set LLVM command line options passed through -backend-option.
void setCommandLineOpts();
- void CreatePasses(legacy::PassManager &MPM, legacy::FunctionPassManager &FPM,
- ModuleSummaryIndex *ModuleSummary);
+ void CreatePasses(legacy::PassManager &MPM, legacy::FunctionPassManager &FPM);
/// Generates the TargetMachine.
/// Leaves TM unchanged if it is unable to create the target machine.
@@ -98,7 +104,7 @@ public:
const clang::TargetOptions &TOpts,
const LangOptions &LOpts, Module *M)
: Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
- TheModule(M), CodeGenerationTime("Code Generation Time") {}
+ TheModule(M), CodeGenerationTime("codegen", "Code Generation Time") {}
~EmitAssemblyHelper() {
if (CodeGenOpts.DisableFree)
@@ -109,6 +115,9 @@ public:
void EmitAssembly(BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS);
+
+ void EmitAssemblyWithNewPassManager(BackendAction Action,
+ std::unique_ptr<raw_pwrite_stream> OS);
};
// We need this wrapper to access LangOpts and CGOpts from extension functions
@@ -147,17 +156,6 @@ static void addAddDiscriminatorsPass(const PassManagerBuilder &Builder,
PM.add(createAddDiscriminatorsPass());
}
-static void addCleanupPassesForSampleProfiler(
- const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) {
- // instcombine is needed before sample profile annotation because it converts
- // certain function calls to be inlinable. simplifycfg and sroa are needed
- // before instcombine for necessary preparation. E.g. load store is eliminated
- // properly so that instcombine will not introduce unecessary liverange.
- PM.add(createCFGSimplificationPass());
- PM.add(createSROAPass());
- PM.add(createInstructionCombiningPass());
-}
-
static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
PM.add(createBoundsCheckingPass());
@@ -174,8 +172,11 @@ static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
Opts.IndirectCalls = CGOpts.SanitizeCoverageIndirectCalls;
Opts.TraceBB = CGOpts.SanitizeCoverageTraceBB;
Opts.TraceCmp = CGOpts.SanitizeCoverageTraceCmp;
+ Opts.TraceDiv = CGOpts.SanitizeCoverageTraceDiv;
+ Opts.TraceGep = CGOpts.SanitizeCoverageTraceGep;
Opts.Use8bitCounters = CGOpts.SanitizeCoverage8bitCounters;
Opts.TracePC = CGOpts.SanitizeCoverageTracePC;
+ Opts.TracePCGuard = CGOpts.SanitizeCoverageTracePCGuard;
PM.add(createSanitizerCoverageModulePass(Opts));
}
@@ -205,7 +206,9 @@ static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
const PassManagerBuilderWrapper &BuilderWrapper =
static_cast<const PassManagerBuilderWrapper&>(Builder);
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- PM.add(createMemorySanitizerPass(CGOpts.SanitizeMemoryTrackOrigins));
+ int TrackOrigins = CGOpts.SanitizeMemoryTrackOrigins;
+ bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Memory);
+ PM.add(createMemorySanitizerPass(TrackOrigins, Recover));
// MemorySanitizer inserts complex instrumentation that mostly follows
// the logic of the original code, but operates on "shadow" values.
@@ -263,6 +266,9 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
case CodeGenOptions::Accelerate:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::Accelerate);
break;
+ case CodeGenOptions::SVML:
+ TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SVML);
+ break;
default:
break;
}
@@ -281,47 +287,33 @@ static void addSymbolRewriterPass(const CodeGenOptions &Opts,
}
void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
- legacy::FunctionPassManager &FPM,
- ModuleSummaryIndex *ModuleSummary) {
+ legacy::FunctionPassManager &FPM) {
+ // Handle disabling of all LLVM passes, where we want to preserve the
+ // internal module before any optimization.
if (CodeGenOpts.DisableLLVMPasses)
return;
- unsigned OptLevel = CodeGenOpts.OptimizationLevel;
- CodeGenOptions::InliningMethod Inlining = CodeGenOpts.getInlining();
-
- // Handle disabling of LLVM optimization, where we want to preserve the
- // internal module before any optimization.
- if (CodeGenOpts.DisableLLVMOpts) {
- OptLevel = 0;
- Inlining = CodeGenOpts.NoInlining;
- }
-
PassManagerBuilderWrapper PMBuilder(CodeGenOpts, LangOpts);
- // Figure out TargetLibraryInfo.
+ // Figure out TargetLibraryInfo. This needs to be added to MPM and FPM
+ // manually (and not via PMBuilder), since some passes (eg. InstrProfiling)
+ // are inserted before PMBuilder ones - they'd get the default-constructed
+ // TLI with an unknown target otherwise.
Triple TargetTriple(TheModule->getTargetTriple());
- PMBuilder.LibraryInfo = createTLII(TargetTriple, CodeGenOpts);
+ std::unique_ptr<TargetLibraryInfoImpl> TLII(
+ createTLII(TargetTriple, CodeGenOpts));
- switch (Inlining) {
- case CodeGenOptions::NoInlining:
- break;
- case CodeGenOptions::NormalInlining:
- case CodeGenOptions::OnlyHintInlining: {
- PMBuilder.Inliner =
- createFunctionInliningPass(OptLevel, CodeGenOpts.OptimizeSize);
- break;
- }
- case CodeGenOptions::OnlyAlwaysInlining:
- // Respect always_inline.
- if (OptLevel == 0)
- // Do not insert lifetime intrinsics at -O0.
- PMBuilder.Inliner = createAlwaysInlinerPass(false);
- else
- PMBuilder.Inliner = createAlwaysInlinerPass();
- break;
+ // At O0 and O1 we only run the always inliner which is more efficient. At
+ // higher optimization levels we run the normal inliner.
+ if (CodeGenOpts.OptimizationLevel <= 1) {
+ bool InsertLifetimeIntrinsics = CodeGenOpts.OptimizationLevel != 0;
+ PMBuilder.Inliner = createAlwaysInlinerLegacyPass(InsertLifetimeIntrinsics);
+ } else {
+ PMBuilder.Inliner = createFunctionInliningPass(
+ CodeGenOpts.OptimizationLevel, CodeGenOpts.OptimizeSize);
}
- PMBuilder.OptLevel = OptLevel;
+ PMBuilder.OptLevel = CodeGenOpts.OptimizationLevel;
PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
PMBuilder.BBVectorize = CodeGenOpts.VectorizeBB;
PMBuilder.SLPVectorize = CodeGenOpts.VectorizeSLP;
@@ -333,13 +325,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
PMBuilder.PrepareForLTO = CodeGenOpts.PrepareForLTO;
PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
- // If we are performing a ThinLTO importing compile, invoke the LTO
- // pipeline and pass down the in-memory module summary index.
- if (ModuleSummary) {
- PMBuilder.ModuleSummary = ModuleSummary;
- PMBuilder.populateThinLTOPassManager(MPM);
- return;
- }
+ MPM.add(new TargetLibraryInfoWrapperPass(*TLII));
// Add target-specific passes that need to run as early as possible.
if (TM)
@@ -413,6 +399,9 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
addDataFlowSanitizerPass);
}
+ if (LangOpts.CoroutinesTS)
+ addCoroutinePassesToExtensionPoints(PMBuilder);
+
if (LangOpts.Sanitize.hasOneOf(SanitizerKind::Efficiency)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addEfficiencySanitizerPass);
@@ -421,6 +410,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
}
// Set up the per-function pass manager.
+ FPM.add(new TargetLibraryInfoWrapperPass(*TLII));
if (CodeGenOpts.VerifyModule)
FPM.add(createVerifierPass());
@@ -453,20 +443,17 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
MPM.add(createInstrProfilingLegacyPass(Options));
}
if (CodeGenOpts.hasProfileIRInstr()) {
+ PMBuilder.EnablePGOInstrGen = true;
if (!CodeGenOpts.InstrProfileOutput.empty())
PMBuilder.PGOInstrGen = CodeGenOpts.InstrProfileOutput;
else
- PMBuilder.PGOInstrGen = "default.profraw";
+ PMBuilder.PGOInstrGen = "default_%m.profraw";
}
if (CodeGenOpts.hasProfileIRUse())
PMBuilder.PGOInstrUse = CodeGenOpts.ProfileInstrumentUsePath;
- if (!CodeGenOpts.SampleProfileFile.empty()) {
- MPM.add(createPruneEHPass());
- MPM.add(createSampleProfileLoaderPass(CodeGenOpts.SampleProfileFile));
- PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
- addCleanupPassesForSampleProfiler);
- }
+ if (!CodeGenOpts.SampleProfileFile.empty())
+ PMBuilder.PGOSampleUse = CodeGenOpts.SampleProfileFile;
PMBuilder.populateFunctionPassManager(FPM);
PMBuilder.populateModulePassManager(MPM);
@@ -517,15 +504,14 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
// Keep this synced with the equivalent code in tools/driver/cc1as_main.cpp.
llvm::Optional<llvm::Reloc::Model> RM;
- if (CodeGenOpts.RelocationModel == "static") {
- RM = llvm::Reloc::Static;
- } else if (CodeGenOpts.RelocationModel == "pic") {
- RM = llvm::Reloc::PIC_;
- } else {
- assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
- "Invalid PIC model!");
- RM = llvm::Reloc::DynamicNoPIC;
- }
+ RM = llvm::StringSwitch<llvm::Reloc::Model>(CodeGenOpts.RelocationModel)
+ .Case("static", llvm::Reloc::Static)
+ .Case("pic", llvm::Reloc::PIC_)
+ .Case("ropi", llvm::Reloc::ROPI)
+ .Case("rwpi", llvm::Reloc::RWPI)
+ .Case("ropi-rwpi", llvm::Reloc::ROPI_RWPI)
+ .Case("dynamic-no-pic", llvm::Reloc::DynamicNoPIC);
+ assert(RM.hasValue() && "invalid PIC model!");
CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
switch (CodeGenOpts.OptimizationLevel) {
@@ -536,9 +522,6 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
llvm::TargetOptions Options;
- if (!TargetOpts.Reciprocals.empty())
- Options.Reciprocals = TargetRecip(TargetOpts.Reciprocals);
-
Options.ThreadModel =
llvm::StringSwitch<llvm::ThreadModel::Model>(CodeGenOpts.ThreadModel)
.Case("posix", llvm::ThreadModel::POSIX)
@@ -601,8 +584,11 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack;
Options.MCOptions.MCIncrementalLinkerCompatible =
CodeGenOpts.IncrementalLinkerCompatible;
+ Options.MCOptions.MCPIECopyRelocations =
+ CodeGenOpts.PIECopyRelocations;
Options.MCOptions.MCFatalWarnings = CodeGenOpts.FatalWarnings;
Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
+ Options.MCOptions.PreserveAsmComments = CodeGenOpts.PreserveAsmComments;
Options.MCOptions.ABIName = TargetOpts.ABI;
TM.reset(TheTarget->createTargetMachine(Triple, TargetOpts.CPU, FeaturesStr,
@@ -659,26 +645,6 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
if (TM)
TheModule->setDataLayout(TM->createDataLayout());
- // If we are performing a ThinLTO importing compile, load the function
- // index into memory and pass it into CreatePasses, which will add it
- // to the PassManagerBuilder and invoke LTO passes.
- std::unique_ptr<ModuleSummaryIndex> ModuleSummary;
- if (!CodeGenOpts.ThinLTOIndexFile.empty()) {
- ErrorOr<std::unique_ptr<ModuleSummaryIndex>> IndexOrErr =
- llvm::getModuleSummaryIndexForFile(
- CodeGenOpts.ThinLTOIndexFile, [&](const DiagnosticInfo &DI) {
- TheModule->getContext().diagnose(DI);
- });
- if (std::error_code EC = IndexOrErr.getError()) {
- std::string Error = EC.message();
- errs() << "Error loading index file '" << CodeGenOpts.ThinLTOIndexFile
- << "': " << Error << "\n";
- return;
- }
- ModuleSummary = std::move(IndexOrErr.get());
- assert(ModuleSummary && "Expected non-empty module summary index");
- }
-
legacy::PassManager PerModulePasses;
PerModulePasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
@@ -687,7 +653,7 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
PerFunctionPasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
- CreatePasses(PerModulePasses, PerFunctionPasses, ModuleSummary.get());
+ CreatePasses(PerModulePasses, PerFunctionPasses);
legacy::PassManager CodeGenPasses;
CodeGenPasses.add(
@@ -740,15 +706,245 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
}
}
+static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
+ switch (Opts.OptimizationLevel) {
+ default:
+ llvm_unreachable("Invalid optimization level!");
+
+ case 1:
+ return PassBuilder::O1;
+
+ case 2:
+ switch (Opts.OptimizeSize) {
+ default:
+ llvm_unreachable("Invalide optimization level for size!");
+
+ case 0:
+ return PassBuilder::O2;
+
+ case 1:
+ return PassBuilder::Os;
+
+ case 2:
+ return PassBuilder::Oz;
+ }
+
+ case 3:
+ return PassBuilder::O3;
+ }
+}
+
+/// A clean version of `EmitAssembly` that uses the new pass manager.
+///
+/// Not all features are currently supported in this system, but where
+/// necessary it falls back to the legacy pass manager to at least provide
+/// basic functionality.
+///
+/// This API is planned to have its functionality finished and then to replace
+/// `EmitAssembly` at some point in the future when the default switches.
+void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
+ BackendAction Action, std::unique_ptr<raw_pwrite_stream> OS) {
+ TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : nullptr);
+ setCommandLineOpts();
+
+ // The new pass manager always makes a target machine available to passes
+ // during construction.
+ CreateTargetMachine(/*MustCreateTM*/ true);
+ if (!TM)
+ // This will already be diagnosed, just bail.
+ return;
+ TheModule->setDataLayout(TM->createDataLayout());
+
+ PassBuilder PB(TM.get());
+
+ LoopAnalysisManager LAM;
+ FunctionAnalysisManager FAM;
+ CGSCCAnalysisManager CGAM;
+ ModuleAnalysisManager MAM;
+
+ // Register the AA manager first so that our version is the one used.
+ FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
+
+ // Register all the basic analyses with the managers.
+ PB.registerModuleAnalyses(MAM);
+ PB.registerCGSCCAnalyses(CGAM);
+ PB.registerFunctionAnalyses(FAM);
+ PB.registerLoopAnalyses(LAM);
+ PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
+
+ ModulePassManager MPM;
+
+ if (!CodeGenOpts.DisableLLVMPasses) {
+ if (CodeGenOpts.OptimizationLevel == 0) {
+ // Build a minimal pipeline based on the semantics required by Clang,
+ // which is just that always inlining occurs.
+ MPM.addPass(AlwaysInlinerPass());
+ } else {
+ // Otherwise, use the default pass pipeline. We also have to map our
+ // optimization levels into one of the distinct levels used to configure
+ // the pipeline.
+ PassBuilder::OptimizationLevel Level = mapToLevel(CodeGenOpts);
+
+ MPM = PB.buildPerModuleDefaultPipeline(Level);
+ }
+ }
+
+ // FIXME: We still use the legacy pass manager to do code generation. We
+ // create that pass manager here and use it as needed below.
+ legacy::PassManager CodeGenPasses;
+ bool NeedCodeGen = false;
+
+ // Append any output we need to the pass manager.
+ switch (Action) {
+ case Backend_EmitNothing:
+ break;
+
+ case Backend_EmitBC:
+ MPM.addPass(BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists,
+ CodeGenOpts.EmitSummaryIndex,
+ CodeGenOpts.EmitSummaryIndex));
+ break;
+
+ case Backend_EmitLL:
+ MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists));
+ break;
+
+ case Backend_EmitAssembly:
+ case Backend_EmitMCNull:
+ case Backend_EmitObj:
+ NeedCodeGen = true;
+ CodeGenPasses.add(
+ createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
+ if (!AddEmitPasses(CodeGenPasses, Action, *OS))
+ // FIXME: Should we handle this error differently?
+ return;
+ break;
+ }
+
+ // Before executing passes, print the final values of the LLVM options.
+ cl::PrintOptionValues();
+
+ // Now that we have all of the passes ready, run them.
+ {
+ PrettyStackTraceString CrashInfo("Optimizer");
+ MPM.run(*TheModule, MAM);
+ }
+
+ // Now if needed, run the legacy PM for codegen.
+ if (NeedCodeGen) {
+ PrettyStackTraceString CrashInfo("Code generation");
+ CodeGenPasses.run(*TheModule);
+ }
+}
+
+static void runThinLTOBackend(const CodeGenOptions &CGOpts, Module *M,
+ std::unique_ptr<raw_pwrite_stream> OS) {
+ // If we are performing a ThinLTO importing compile, load the function index
+ // into memory and pass it into thinBackend, which will run the function
+ // importer and invoke LTO passes.
+ Expected<std::unique_ptr<ModuleSummaryIndex>> IndexOrErr =
+ llvm::getModuleSummaryIndexForFile(CGOpts.ThinLTOIndexFile);
+ if (!IndexOrErr) {
+ logAllUnhandledErrors(IndexOrErr.takeError(), errs(),
+ "Error loading index file '" +
+ CGOpts.ThinLTOIndexFile + "': ");
+ return;
+ }
+ std::unique_ptr<ModuleSummaryIndex> CombinedIndex = std::move(*IndexOrErr);
+
+ StringMap<std::map<GlobalValue::GUID, GlobalValueSummary *>>
+ ModuleToDefinedGVSummaries;
+ CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
+
+ // We can simply import the values mentioned in the combined index, since
+ // we should only invoke this using the individual indexes written out
+ // via a WriteIndexesThinBackend.
+ FunctionImporter::ImportMapTy ImportList;
+ for (auto &GlobalList : *CombinedIndex) {
+ auto GUID = GlobalList.first;
+ assert(GlobalList.second.size() == 1 &&
+ "Expected individual combined index to have one summary per GUID");
+ auto &Summary = GlobalList.second[0];
+ // Skip the summaries for the importing module. These are included to
+ // e.g. record required linkage changes.
+ if (Summary->modulePath() == M->getModuleIdentifier())
+ continue;
+ // Doesn't matter what value we plug in to the map, just needs an entry
+ // to provoke importing by thinBackend.
+ ImportList[Summary->modulePath()][GUID] = 1;
+ }
+
+ std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
+ MapVector<llvm::StringRef, llvm::BitcodeModule> ModuleMap;
+
+ for (auto &I : ImportList) {
+ ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MBOrErr =
+ llvm::MemoryBuffer::getFile(I.first());
+ if (!MBOrErr) {
+ errs() << "Error loading imported file '" << I.first()
+ << "': " << MBOrErr.getError().message() << "\n";
+ return;
+ }
+
+ Expected<std::vector<BitcodeModule>> BMsOrErr =
+ getBitcodeModuleList(**MBOrErr);
+ if (!BMsOrErr) {
+ handleAllErrors(BMsOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ errs() << "Error loading imported file '" << I.first()
+ << "': " << EIB.message() << '\n';
+ });
+ return;
+ }
+
+ // The bitcode file may contain multiple modules, we want the one with a
+ // summary.
+ bool FoundModule = false;
+ for (BitcodeModule &BM : *BMsOrErr) {
+ Expected<bool> HasSummary = BM.hasSummary();
+ if (HasSummary && *HasSummary) {
+ ModuleMap.insert({I.first(), BM});
+ FoundModule = true;
+ break;
+ }
+ }
+ if (!FoundModule) {
+ errs() << "Error loading imported file '" << I.first()
+ << "': Could not find module summary\n";
+ return;
+ }
+
+ OwnedImports.push_back(std::move(*MBOrErr));
+ }
+ auto AddStream = [&](size_t Task) {
+ return llvm::make_unique<lto::NativeObjectStream>(std::move(OS));
+ };
+ lto::Config Conf;
+ if (Error E = thinBackend(
+ Conf, 0, AddStream, *M, *CombinedIndex, ImportList,
+ ModuleToDefinedGVSummaries[M->getModuleIdentifier()], ModuleMap)) {
+ handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
+ errs() << "Error running ThinLTO backend: " << EIB.message() << '\n';
+ });
+ }
+}
+
void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
const CodeGenOptions &CGOpts,
const clang::TargetOptions &TOpts,
const LangOptions &LOpts, const llvm::DataLayout &TDesc,
Module *M, BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS) {
+ if (!CGOpts.ThinLTOIndexFile.empty()) {
+ runThinLTOBackend(CGOpts, M, std::move(OS));
+ return;
+ }
+
EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M);
- AsmHelper.EmitAssembly(Action, std::move(OS));
+ if (CGOpts.ExperimentalNewPassManager)
+ AsmHelper.EmitAssemblyWithNewPassManager(Action, std::move(OS));
+ else
+ AsmHelper.EmitAssembly(Action, std::move(OS));
// Verify clang's TargetInfo DataLayout against the LLVM TargetMachine's
// DataLayout.
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 7b747c138303..9287e46127bd 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -11,13 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenFunction.h"
#include "CGCall.h"
#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Operator.h"
@@ -308,7 +307,8 @@ static RValue emitAtomicLibcall(CodeGenFunction &CGF,
CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
- return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
+ auto callee = CGCallee::forDirect(fn);
+ return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
}
/// Does a store of the given IR type modify the full expected width?
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index e3658ab9b762..b250b9a32b18 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -16,6 +16,7 @@
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantBuilder.h"
#include "clang/AST/DeclObjC.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/CallSite.h"
@@ -77,63 +78,63 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
ASTContext &C = CGM.getContext();
- llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
- llvm::Type *i8p = nullptr;
+ llvm::IntegerType *ulong =
+ cast<llvm::IntegerType>(CGM.getTypes().ConvertType(C.UnsignedLongTy));
+ llvm::PointerType *i8p = nullptr;
if (CGM.getLangOpts().OpenCL)
i8p =
llvm::Type::getInt8PtrTy(
CGM.getLLVMContext(), C.getTargetAddressSpace(LangAS::opencl_constant));
else
- i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+ i8p = CGM.VoidPtrTy;
- SmallVector<llvm::Constant*, 6> elements;
+ ConstantInitBuilder builder(CGM);
+ auto elements = builder.beginStruct();
// reserved
- elements.push_back(llvm::ConstantInt::get(ulong, 0));
+ elements.addInt(ulong, 0);
// Size
// FIXME: What is the right way to say this doesn't fit? We should give
// a user diagnostic in that case. Better fix would be to change the
// API to size_t.
- elements.push_back(llvm::ConstantInt::get(ulong,
- blockInfo.BlockSize.getQuantity()));
+ elements.addInt(ulong, blockInfo.BlockSize.getQuantity());
// Optional copy/dispose helpers.
if (blockInfo.NeedsCopyDispose) {
// copy_func_helper_decl
- elements.push_back(buildCopyHelper(CGM, blockInfo));
+ elements.add(buildCopyHelper(CGM, blockInfo));
// destroy_func_decl
- elements.push_back(buildDisposeHelper(CGM, blockInfo));
+ elements.add(buildDisposeHelper(CGM, blockInfo));
}
// Signature. Mandatory ObjC-style method descriptor @encode sequence.
std::string typeAtEncoding =
CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
- elements.push_back(llvm::ConstantExpr::getBitCast(
+ elements.add(llvm::ConstantExpr::getBitCast(
CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer(), i8p));
// GC layout.
if (C.getLangOpts().ObjC1) {
if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
- elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ elements.add(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
else
- elements.push_back(CGM.getObjCRuntime().BuildRCBlockLayout(CGM, blockInfo));
+ elements.add(CGM.getObjCRuntime().BuildRCBlockLayout(CGM, blockInfo));
}
else
- elements.push_back(llvm::Constant::getNullValue(i8p));
-
- llvm::Constant *init = llvm::ConstantStruct::getAnon(elements);
+ elements.addNullPointer(i8p);
unsigned AddrSpace = 0;
if (C.getLangOpts().OpenCL)
AddrSpace = C.getTargetAddressSpace(LangAS::opencl_constant);
+
llvm::GlobalVariable *global =
- new llvm::GlobalVariable(CGM.getModule(), init->getType(), true,
- llvm::GlobalValue::InternalLinkage,
- init, "__block_descriptor_tmp", nullptr,
- llvm::GlobalValue::NotThreadLocal,
- AddrSpace);
+ elements.finishAndCreateGlobal("__block_descriptor_tmp",
+ CGM.getPointerAlign(),
+ /*constant*/ true,
+ llvm::GlobalValue::InternalLinkage,
+ AddrSpace);
return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType());
}
@@ -188,9 +189,6 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
};
*/
-/// The number of fields in a block header.
-const unsigned BlockHeaderSize = 5;
-
namespace {
/// A chunk of data that we actually have to capture in the block.
struct BlockLayoutChunk {
@@ -199,13 +197,14 @@ namespace {
Qualifiers::ObjCLifetime Lifetime;
const BlockDecl::Capture *Capture; // null for 'this'
llvm::Type *Type;
+ QualType FieldType;
BlockLayoutChunk(CharUnits align, CharUnits size,
Qualifiers::ObjCLifetime lifetime,
const BlockDecl::Capture *capture,
- llvm::Type *type)
+ llvm::Type *type, QualType fieldType)
: Alignment(align), Size(size), Lifetime(lifetime),
- Capture(capture), Type(type) {}
+ Capture(capture), Type(type), FieldType(fieldType) {}
/// Tell the block info that this chunk has the given field index.
void setIndex(CGBlockInfo &info, unsigned index, CharUnits offset) {
@@ -213,8 +212,8 @@ namespace {
info.CXXThisIndex = index;
info.CXXThisOffset = offset;
} else {
- info.Captures.insert({Capture->getVariable(),
- CGBlockInfo::Capture::makeIndex(index, offset)});
+ auto C = CGBlockInfo::Capture::makeIndex(index, offset, FieldType);
+ info.Captures.insert({Capture->getVariable(), C});
}
}
};
@@ -317,8 +316,6 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
elementTypes.push_back(CGM.IntTy);
elementTypes.push_back(CGM.VoidPtrTy);
elementTypes.push_back(CGM.getBlockDescriptorType());
-
- assert(elementTypes.size() == BlockHeaderSize);
}
/// Compute the layout of the given block. Attempts to lay the block
@@ -363,7 +360,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
Qualifiers::OCL_None,
- nullptr, llvmType));
+ nullptr, llvmType, thisType));
}
// Next, all the block captures.
@@ -380,7 +377,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
layout.push_back(BlockLayoutChunk(align, CGM.getPointerSize(),
Qualifiers::OCL_None, &CI,
- CGM.VoidPtrTy));
+ CGM.VoidPtrTy, variable->getType()));
continue;
}
@@ -436,6 +433,14 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
}
QualType VT = variable->getType();
+
+ // If the variable is captured by an enclosing block or lambda expression,
+ // use the type of the capture field.
+ if (CGF->BlockInfo && CI.isNested())
+ VT = CGF->BlockInfo->getCapture(variable).fieldType();
+ else if (auto *FD = CGF->LambdaCaptureFields.lookup(variable))
+ VT = FD->getType();
+
CharUnits size = C.getTypeSizeInChars(VT);
CharUnits align = C.getDeclAlign(variable);
@@ -444,7 +449,8 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
llvm::Type *llvmType =
CGM.getTypes().ConvertTypeForMem(VT);
- layout.push_back(BlockLayoutChunk(align, size, lifetime, &CI, llvmType));
+ layout.push_back(
+ BlockLayoutChunk(align, size, lifetime, &CI, llvmType, VT));
}
// If that was everything, we're done here.
@@ -680,6 +686,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// If the block has no captures, we won't have a pre-computed
// layout for it.
if (!blockExpr->getBlockDecl()->hasCaptures()) {
+ if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr))
+ return Block;
CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
computeBlockInfo(CGM, this, blockInfo);
blockInfo.BlockExpression = blockExpr;
@@ -775,7 +783,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Ignore constant captures.
if (capture.isConstant()) continue;
- QualType type = variable->getType();
+ QualType type = capture.fieldType();
// This will be a [[type]]*, except that a byref entry will just be
// an i8**.
@@ -965,25 +973,24 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
const BlockPointerType *BPT =
E->getCallee()->getType()->getAs<BlockPointerType>();
- llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+ llvm::Value *BlockPtr = EmitScalarExpr(E->getCallee());
// Get a pointer to the generic block literal.
llvm::Type *BlockLiteralTy =
llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
// Bitcast the callee to a block literal.
- llvm::Value *BlockLiteral =
- Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
+ BlockPtr = Builder.CreateBitCast(BlockPtr, BlockLiteralTy, "block.literal");
// Get the function pointer from the literal.
llvm::Value *FuncPtr =
- Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockLiteral, 3);
+ Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr, 3);
- BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy);
+ BlockPtr = Builder.CreateBitCast(BlockPtr, VoidPtrTy);
// Add the block literal.
CallArgList Args;
- Args.add(RValue::get(BlockLiteral), getContext().VoidPtrTy);
+ Args.add(RValue::get(BlockPtr), getContext().VoidPtrTy);
QualType FnType = BPT->getPointeeType();
@@ -1003,8 +1010,11 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
Func = Builder.CreateBitCast(Func, BlockFTyPtr);
+ // Prepare the callee.
+ CGCallee Callee(CGCalleeInfo(), Func);
+
// And call the block.
- return EmitCall(FnInfo, Func, ReturnValue, Args);
+ return EmitCall(FnInfo, Callee, ReturnValue, Args);
}
Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
@@ -1033,18 +1043,27 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
variable->getName());
}
- if (auto refType = variable->getType()->getAs<ReferenceType>()) {
+ if (auto refType = capture.fieldType()->getAs<ReferenceType>())
addr = EmitLoadOfReference(addr, refType);
- }
return addr;
}
+void CodeGenModule::setAddrOfGlobalBlock(const BlockExpr *BE,
+ llvm::Constant *Addr) {
+ bool Ok = EmittedGlobalBlocks.insert(std::make_pair(BE, Addr)).second;
+ (void)Ok;
+ assert(Ok && "Trying to replace an already-existing global block!");
+}
+
llvm::Constant *
-CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
- const char *name) {
- CGBlockInfo blockInfo(blockExpr->getBlockDecl(), name);
- blockInfo.BlockExpression = blockExpr;
+CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *BE,
+ StringRef Name) {
+ if (llvm::Constant *Block = getAddrOfGlobalBlockIfEmitted(BE))
+ return Block;
+
+ CGBlockInfo blockInfo(BE->getBlockDecl(), Name);
+ blockInfo.BlockExpression = BE;
// Compute information about the layout, etc., of this block.
computeBlockInfo(*this, nullptr, blockInfo);
@@ -1067,43 +1086,46 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
const CGBlockInfo &blockInfo,
llvm::Constant *blockFn) {
assert(blockInfo.CanBeGlobal);
+ // Callers should detect this case on their own: calling this function
+ // generally requires computing layout information, which is a waste of time
+ // if we've already emitted this block.
+ assert(!CGM.getAddrOfGlobalBlockIfEmitted(blockInfo.BlockExpression) &&
+ "Refusing to re-emit a global block.");
// Generate the constants for the block literal initializer.
- llvm::Constant *fields[BlockHeaderSize];
+ ConstantInitBuilder builder(CGM);
+ auto fields = builder.beginStruct();
// isa
- fields[0] = CGM.getNSConcreteGlobalBlock();
+ fields.add(CGM.getNSConcreteGlobalBlock());
// __flags
BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE;
if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
- fields[1] = llvm::ConstantInt::get(CGM.IntTy, flags.getBitMask());
+ fields.addInt(CGM.IntTy, flags.getBitMask());
// Reserved
- fields[2] = llvm::Constant::getNullValue(CGM.IntTy);
+ fields.addInt(CGM.IntTy, 0);
// Function
- fields[3] = blockFn;
+ fields.add(blockFn);
// Descriptor
- fields[4] = buildBlockDescriptor(CGM, blockInfo);
-
- llvm::Constant *init = llvm::ConstantStruct::getAnon(fields);
+ fields.add(buildBlockDescriptor(CGM, blockInfo));
- llvm::GlobalVariable *literal =
- new llvm::GlobalVariable(CGM.getModule(),
- init->getType(),
- /*constant*/ true,
- llvm::GlobalVariable::InternalLinkage,
- init,
- "__block_literal_global");
- literal->setAlignment(blockInfo.BlockAlign.getQuantity());
+ llvm::Constant *literal =
+ fields.finishAndCreateGlobal("__block_literal_global",
+ blockInfo.BlockAlign,
+ /*constant*/ true);
// Return a constant of the appropriately-casted type.
- llvm::Type *requiredType =
+ llvm::Type *RequiredType =
CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
- return llvm::ConstantExpr::getBitCast(literal, requiredType);
+ llvm::Constant *Result =
+ llvm::ConstantExpr::getBitCast(literal, RequiredType);
+ CGM.setAddrOfGlobalBlock(blockInfo.BlockExpression, Result);
+ return Result;
}
void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
@@ -1939,7 +1961,7 @@ static T *buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo,
generator.CopyHelper = buildByrefCopyHelper(CGM, byrefInfo, generator);
generator.DisposeHelper = buildByrefDisposeHelper(CGM, byrefInfo, generator);
- T *copy = new (CGM.getContext()) T(std::move(generator));
+ T *copy = new (CGM.getContext()) T(std::forward<T>(generator));
CGM.ByrefHelpersCache.InsertNode(copy, insertPos);
return copy;
}
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 1edabef4ec74..80e255f75417 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -25,10 +25,8 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/Type.h"
#include "clang/Basic/TargetInfo.h"
-#include "llvm/IR/Module.h"
namespace llvm {
-class Module;
class Constant;
class Function;
class GlobalValue;
@@ -40,10 +38,8 @@ class LLVMContext;
}
namespace clang {
-
namespace CodeGen {
-class CodeGenModule;
class CGBlockInfo;
// Flags stored in __block variables.
@@ -163,6 +159,11 @@ public:
EHScopeStack::stable_iterator Cleanup;
CharUnits::QuantityType Offset;
+ /// Type of the capture field. Normally, this is identical to the type of
+ /// the capture's VarDecl, but can be different if there is an enclosing
+ /// lambda.
+ QualType FieldType;
+
public:
bool isIndex() const { return (Data & 1) != 0; }
bool isConstant() const { return !isIndex(); }
@@ -189,10 +190,16 @@ public:
return reinterpret_cast<llvm::Value*>(Data);
}
- static Capture makeIndex(unsigned index, CharUnits offset) {
+ QualType fieldType() const {
+ return FieldType;
+ }
+
+ static Capture makeIndex(unsigned index, CharUnits offset,
+ QualType FieldType) {
Capture v;
v.Data = (index << 1) | 1;
v.Offset = offset.getQuantity();
+ v.FieldType = FieldType;
return v;
}
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
index 027435d7c599..42f9a428bb3a 100644
--- a/lib/CodeGen/CGBuilder.h
+++ b/lib/CodeGen/CGBuilder.h
@@ -102,11 +102,6 @@ public:
assert(Addr->getType()->getPointerElementType() == Ty);
return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
}
- llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
- bool IsVolatile,
- const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr, Align.getQuantity(), IsVolatile, Name);
- }
// Note that we intentionally hide the CreateStore APIs that don't
// take an alignment.
@@ -124,19 +119,6 @@ public:
// FIXME: these "default-aligned" APIs should be removed,
// but I don't feel like fixing all the builtin code right now.
- llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr,
- const llvm::Twine &Name = "") {
- return CGBuilderBaseTy::CreateLoad(Addr, false, Name);
- }
- llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr,
- const char *Name) {
- return CGBuilderBaseTy::CreateLoad(Addr, false, Name);
- }
- llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr, bool IsVolatile,
- const llvm::Twine &Name = "") {
- return CGBuilderBaseTy::CreateLoad(Addr, IsVolatile, Name);
- }
-
llvm::StoreInst *CreateDefaultAlignedStore(llvm::Value *Val,
llvm::Value *Addr,
bool IsVolatile = false) {
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index a5fc53113bdc..43ca74761fbd 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -11,13 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
+#include "CGOpenCLRuntime.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
+#include "clang/Analysis/Analyses/OSLog.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
@@ -35,8 +37,8 @@ using namespace llvm;
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
-llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
- unsigned BuiltinID) {
+llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID) {
assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
// Get the name, skip over the __builtin_ prefix (if necessary).
@@ -302,10 +304,10 @@ static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
return CGF.Builder.CreateICmpSLT(V, Zero);
}
-static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
- const CallExpr *E, llvm::Value *calleeValue) {
- return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E,
- ReturnValueSlot(), Fn);
+static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
+ const CallExpr *E, llvm::Constant *calleeValue) {
+ CGCallee callee = CGCallee::forDirect(calleeValue, FD);
+ return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
}
/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
@@ -462,6 +464,119 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
return Builder.CreateCall(F, {EmitScalarExpr(E), CI});
}
+// Many of MSVC builtins are on both x64 and ARM; to avoid repeating code, we
+// handle them here.
+enum class CodeGenFunction::MSVCIntrin {
+ _BitScanForward,
+ _BitScanReverse,
+ _InterlockedAnd,
+ _InterlockedDecrement,
+ _InterlockedExchange,
+ _InterlockedExchangeAdd,
+ _InterlockedExchangeSub,
+ _InterlockedIncrement,
+ _InterlockedOr,
+ _InterlockedXor,
+};
+
+Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
+ const CallExpr *E) {
+ switch (BuiltinID) {
+ case MSVCIntrin::_BitScanForward:
+ case MSVCIntrin::_BitScanReverse: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(1));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ llvm::Type *IndexType =
+ EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
+ llvm::Type *ResultType = ConvertType(E->getType());
+
+ Value *ArgZero = llvm::Constant::getNullValue(ArgType);
+ Value *ResZero = llvm::Constant::getNullValue(ResultType);
+ Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
+
+ BasicBlock *Begin = Builder.GetInsertBlock();
+ BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
+ Builder.SetInsertPoint(End);
+ PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
+
+ Builder.SetInsertPoint(Begin);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
+ BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
+ Builder.CreateCondBr(IsZero, End, NotZero);
+ Result->addIncoming(ResZero, Begin);
+
+ Builder.SetInsertPoint(NotZero);
+ Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
+
+ if (BuiltinID == MSVCIntrin::_BitScanForward) {
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+ Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
+ ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
+ Builder.CreateStore(ZeroCount, IndexAddress, false);
+ } else {
+ unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
+ Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
+
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+ Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
+ ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
+ Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
+ Builder.CreateStore(Index, IndexAddress, false);
+ }
+ Builder.CreateBr(End);
+ Result->addIncoming(ResOne, NotZero);
+
+ Builder.SetInsertPoint(End);
+ return Result;
+ }
+ case MSVCIntrin::_InterlockedAnd:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
+ case MSVCIntrin::_InterlockedExchange:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
+ case MSVCIntrin::_InterlockedExchangeAdd:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
+ case MSVCIntrin::_InterlockedExchangeSub:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
+ case MSVCIntrin::_InterlockedOr:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
+ case MSVCIntrin::_InterlockedXor:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
+
+ case MSVCIntrin::_InterlockedDecrement: {
+ llvm::Type *IntTy = ConvertType(E->getType());
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Sub,
+ EmitScalarExpr(E->getArg(0)),
+ ConstantInt::get(IntTy, 1),
+ llvm::AtomicOrdering::SequentiallyConsistent);
+ return Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1));
+ }
+ case MSVCIntrin::_InterlockedIncrement: {
+ llvm::Type *IntTy = ConvertType(E->getType());
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Add,
+ EmitScalarExpr(E->getArg(0)),
+ ConstantInt::get(IntTy, 1),
+ llvm::AtomicOrdering::SequentiallyConsistent);
+ return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1));
+ }
+ }
+ llvm_unreachable("Incorrect MSVC intrinsic!");
+}
+
+namespace {
+// ARC cleanup for __builtin_os_log_format
+struct CallObjCArcUse final : EHScopeStack::Cleanup {
+ CallObjCArcUse(llvm::Value *object) : object(object) {}
+ llvm::Value *object;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitARCIntrinsicUse(object);
+ }
+};
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue) {
@@ -681,6 +796,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"cast");
return RValue::get(Result);
}
+ case Builtin::BI__popcnt16:
+ case Builtin::BI__popcnt:
+ case Builtin::BI__popcnt64:
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll: {
@@ -696,6 +814,58 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"cast");
return RValue::get(Result);
}
+ case Builtin::BI_rotr8:
+ case Builtin::BI_rotr16:
+ case Builtin::BI_rotr:
+ case Builtin::BI_lrotr:
+ case Builtin::BI_rotr64: {
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Value *Shift = EmitScalarExpr(E->getArg(1));
+
+ llvm::Type *ArgType = Val->getType();
+ Shift = Builder.CreateIntCast(Shift, ArgType, false);
+ unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
+ Value *ArgTypeSize = llvm::ConstantInt::get(ArgType, ArgWidth);
+ Value *ArgZero = llvm::Constant::getNullValue(ArgType);
+
+ Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
+ Shift = Builder.CreateAnd(Shift, Mask);
+ Value *LeftShift = Builder.CreateSub(ArgTypeSize, Shift);
+
+ Value *RightShifted = Builder.CreateLShr(Val, Shift);
+ Value *LeftShifted = Builder.CreateShl(Val, LeftShift);
+ Value *Rotated = Builder.CreateOr(LeftShifted, RightShifted);
+
+ Value *ShiftIsZero = Builder.CreateICmpEQ(Shift, ArgZero);
+ Value *Result = Builder.CreateSelect(ShiftIsZero, Val, Rotated);
+ return RValue::get(Result);
+ }
+ case Builtin::BI_rotl8:
+ case Builtin::BI_rotl16:
+ case Builtin::BI_rotl:
+ case Builtin::BI_lrotl:
+ case Builtin::BI_rotl64: {
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Value *Shift = EmitScalarExpr(E->getArg(1));
+
+ llvm::Type *ArgType = Val->getType();
+ Shift = Builder.CreateIntCast(Shift, ArgType, false);
+ unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
+ Value *ArgTypeSize = llvm::ConstantInt::get(ArgType, ArgWidth);
+ Value *ArgZero = llvm::Constant::getNullValue(ArgType);
+
+ Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
+ Shift = Builder.CreateAnd(Shift, Mask);
+ Value *RightShift = Builder.CreateSub(ArgTypeSize, Shift);
+
+ Value *LeftShifted = Builder.CreateShl(Val, Shift);
+ Value *RightShifted = Builder.CreateLShr(Val, RightShift);
+ Value *Rotated = Builder.CreateOr(LeftShifted, RightShifted);
+
+ Value *ShiftIsZero = Builder.CreateICmpEQ(Shift, ArgZero);
+ Value *Result = Builder.CreateSelect(ShiftIsZero, Val, Rotated);
+ return RValue::get(Result);
+ }
case Builtin::BI__builtin_unpredictable: {
// Always return the argument of __builtin_unpredictable. LLVM does not
// handle this builtin. Metadata for this builtin should be added directly
@@ -789,8 +959,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
SanitizerScope SanScope(this);
EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
SanitizerKind::Unreachable),
- "builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()),
- None);
+ SanitizerHandler::BuiltinUnreachable,
+ EmitCheckSourceLocation(E->getExprLoc()), None);
} else
Builder.CreateUnreachable();
@@ -851,6 +1021,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
+ case Builtin::BIfinite:
+ case Builtin::BI__finite:
+ case Builtin::BIfinitef:
+ case Builtin::BI__finitef:
+ case Builtin::BIfinitel:
+ case Builtin::BI__finitel:
case Builtin::BI__builtin_isinf:
case Builtin::BI__builtin_isfinite: {
// isinf(x) --> fabs(x) == infinity
@@ -963,8 +1139,29 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI_alloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
- return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
+ const TargetInfo &TI = getContext().getTargetInfo();
+ // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
+ unsigned SuitableAlignmentInBytes =
+ CGM.getContext()
+ .toCharUnitsFromBits(TI.getSuitableAlign())
+ .getQuantity();
+ AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
+ AI->setAlignment(SuitableAlignmentInBytes);
+ return RValue::get(AI);
+ }
+
+ case Builtin::BI__builtin_alloca_with_align: {
+ Value *Size = EmitScalarExpr(E->getArg(0));
+ Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
+ auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
+ unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
+ unsigned AlignmentInBytes =
+ CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getQuantity();
+ AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
+ AI->setAlignment(AlignmentInBytes);
+ return RValue::get(AI);
}
+
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
@@ -1085,6 +1282,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
+ case Builtin::BI_ReturnAddress: {
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
+ return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
+ }
case Builtin::BI__builtin_frame_address: {
Value *Depth =
CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this);
@@ -1390,7 +1591,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
- return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ return EmitCall(FuncInfo, CGCallee::forDirect(Func),
+ ReturnValueSlot(), Args);
}
case Builtin::BI__atomic_test_and_set: {
@@ -1905,12 +2107,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
const CallExpr *Call = cast<CallExpr>(E->getArg(0));
const Expr *Chain = E->getArg(1);
return EmitCall(Call->getCallee()->getType(),
- EmitScalarExpr(Call->getCallee()), Call, ReturnValue,
- Call->getCalleeDecl(), EmitScalarExpr(Chain));
+ EmitCallee(Call->getCallee()), Call, ReturnValue,
+ EmitScalarExpr(Chain));
}
+ case Builtin::BI_InterlockedExchange8:
+ case Builtin::BI_InterlockedExchange16:
case Builtin::BI_InterlockedExchange:
case Builtin::BI_InterlockedExchangePointer:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
case Builtin::BI_InterlockedCompareExchangePointer: {
llvm::Type *RTy;
llvm::IntegerType *IntType =
@@ -1938,7 +2143,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
0),
RTy));
}
- case Builtin::BI_InterlockedCompareExchange: {
+ case Builtin::BI_InterlockedCompareExchange8:
+ case Builtin::BI_InterlockedCompareExchange16:
+ case Builtin::BI_InterlockedCompareExchange:
+ case Builtin::BI_InterlockedCompareExchange64: {
AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(2)),
@@ -1948,42 +2156,44 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
CXI->setVolatile(true);
return RValue::get(Builder.CreateExtractValue(CXI, 0));
}
- case Builtin::BI_InterlockedIncrement: {
- llvm::Type *IntTy = ConvertType(E->getType());
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Add,
- EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- llvm::AtomicOrdering::SequentiallyConsistent);
- RMWI->setVolatile(true);
- return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1)));
- }
- case Builtin::BI_InterlockedDecrement: {
- llvm::Type *IntTy = ConvertType(E->getType());
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Sub,
- EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- llvm::AtomicOrdering::SequentiallyConsistent);
- RMWI->setVolatile(true);
- return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1)));
- }
- case Builtin::BI_InterlockedExchangeAdd: {
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Add,
- EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(1)),
- llvm::AtomicOrdering::SequentiallyConsistent);
- RMWI->setVolatile(true);
- return RValue::get(RMWI);
- }
+ case Builtin::BI_InterlockedIncrement16:
+ case Builtin::BI_InterlockedIncrement:
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
+ case Builtin::BI_InterlockedDecrement16:
+ case Builtin::BI_InterlockedDecrement:
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
+ case Builtin::BI_InterlockedAnd8:
+ case Builtin::BI_InterlockedAnd16:
+ case Builtin::BI_InterlockedAnd:
+ return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
+ case Builtin::BI_InterlockedExchangeAdd8:
+ case Builtin::BI_InterlockedExchangeAdd16:
+ case Builtin::BI_InterlockedExchangeAdd:
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
+ case Builtin::BI_InterlockedExchangeSub8:
+ case Builtin::BI_InterlockedExchangeSub16:
+ case Builtin::BI_InterlockedExchangeSub:
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
+ case Builtin::BI_InterlockedOr8:
+ case Builtin::BI_InterlockedOr16:
+ case Builtin::BI_InterlockedOr:
+ return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
+ case Builtin::BI_InterlockedXor8:
+ case Builtin::BI_InterlockedXor16:
+ case Builtin::BI_InterlockedXor:
+ return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
case Builtin::BI__readfsdword: {
llvm::Type *IntTy = ConvertType(E->getType());
Value *IntToPtr =
Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
llvm::PointerType::get(IntTy, 257));
- LoadInst *Load =
- Builder.CreateDefaultAlignedLoad(IntToPtr, /*isVolatile=*/true);
+ LoadInst *Load = Builder.CreateAlignedLoad(
+ IntTy, IntToPtr, getContext().getTypeAlignInChars(E->getType()));
+ Load->setVolatile(true);
return RValue::get(Load);
}
@@ -2004,7 +2214,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Attribute::ReturnsTwice);
llvm::Constant *SetJmpEx = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
- "_setjmpex", ReturnsTwiceAttr);
+ "_setjmpex", ReturnsTwiceAttr, /*Local=*/true);
llvm::Value *Buf = Builder.CreateBitOrPointerCast(
EmitScalarExpr(E->getArg(0)), Int8PtrTy);
llvm::Value *FrameAddr =
@@ -2029,7 +2239,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgTypes[] = {Int8PtrTy, IntTy};
llvm::Constant *SetJmp3 = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/true),
- "_setjmp3", ReturnsTwiceAttr);
+ "_setjmp3", ReturnsTwiceAttr, /*Local=*/true);
llvm::Value *Count = ConstantInt::get(IntTy, 0);
llvm::Value *Args[] = {Buf, Count};
CS = EmitRuntimeCallOrInvoke(SetJmp3, Args);
@@ -2037,7 +2247,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
llvm::Constant *SetJmp = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
- "_setjmp", ReturnsTwiceAttr);
+ "_setjmp", ReturnsTwiceAttr, /*Local=*/true);
llvm::Value *FrameAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
ConstantInt::get(Int32Ty, 0));
@@ -2057,11 +2267,47 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
}
+ case Builtin::BI__builtin_coro_size: {
+ auto & Context = getContext();
+ auto SizeTy = Context.getSizeType();
+ auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
+ Value *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
+ return RValue::get(Builder.CreateCall(F));
+ }
+
+ case Builtin::BI__builtin_coro_id:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
+ case Builtin::BI__builtin_coro_promise:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
+ case Builtin::BI__builtin_coro_resume:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
+ case Builtin::BI__builtin_coro_frame:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
+ case Builtin::BI__builtin_coro_free:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
+ case Builtin::BI__builtin_coro_destroy:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
+ case Builtin::BI__builtin_coro_done:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
+ case Builtin::BI__builtin_coro_alloc:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
+ case Builtin::BI__builtin_coro_begin:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
+ case Builtin::BI__builtin_coro_end:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
+ case Builtin::BI__builtin_coro_suspend:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
+ case Builtin::BI__builtin_coro_param:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
+
// OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
case Builtin::BIread_pipe:
case Builtin::BIwrite_pipe: {
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
+ CGOpenCLRuntime OpenCLRT(CGM);
+ Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
+ Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Type of the generic packet parameter.
unsigned GenericAS =
@@ -2075,19 +2321,21 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
: "__write_pipe_2";
// Creating a generic function type to be able to call with any builtin or
// user defined type.
- llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy};
+ llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
- return RValue::get(Builder.CreateCall(
- CGM.CreateRuntimeFunction(FTy, Name), {Arg0, BCast}));
+ return RValue::get(
+ Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, BCast, PacketSize, PacketAlign}));
} else {
assert(4 == E->getNumArgs() &&
"Illegal number of parameters to pipe function");
const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
: "__write_pipe_4";
- llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy};
+ llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
+ Int32Ty, Int32Ty};
Value *Arg2 = EmitScalarExpr(E->getArg(2)),
*Arg3 = EmitScalarExpr(E->getArg(3));
llvm::FunctionType *FTy = llvm::FunctionType::get(
@@ -2098,7 +2346,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if (Arg2->getType() != Int32Ty)
Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
return RValue::get(Builder.CreateCall(
- CGM.CreateRuntimeFunction(FTy, Name), {Arg0, Arg1, Arg2, BCast}));
+ CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
}
}
// OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
@@ -2127,9 +2376,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
+ CGOpenCLRuntime OpenCLRT(CGM);
+ Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
+ Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Building the generic function prototype.
- llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty};
+ llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
// We know the second argument is an integer type, but we may need to cast
@@ -2137,7 +2389,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if (Arg1->getType() != Int32Ty)
Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), {Arg0, Arg1}));
+ Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, Arg1, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
// functions
@@ -2163,15 +2416,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
+ CGOpenCLRuntime OpenCLRT(CGM);
+ Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
+ Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Building the generic function prototype.
- llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType()};
+ llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), {Arg0, Arg1}));
+ Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, Arg1, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
case Builtin::BIget_pipe_num_packets:
@@ -2184,12 +2441,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Building the generic function prototype.
Value *Arg0 = EmitScalarExpr(E->getArg(0));
- llvm::Type *ArgTys[] = {Arg0->getType()};
+ CGOpenCLRuntime OpenCLRT(CGM);
+ Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
+ Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
+ llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), {Arg0}));
+ return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
@@ -2258,17 +2518,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
std::vector<llvm::Type *> ArgTys = {QueueTy, IntTy, RangeTy, Int8PtrTy,
IntTy};
- // Add the variadics.
- for (unsigned I = 4; I < NumArgs; ++I) {
- llvm::Value *ArgSize = EmitScalarExpr(E->getArg(I));
- unsigned TypeSizeInBytes =
- getContext()
- .getTypeSizeInChars(E->getArg(I)->getType())
- .getQuantity();
- Args.push_back(TypeSizeInBytes < 4
- ? Builder.CreateZExt(ArgSize, Int32Ty)
- : ArgSize);
- }
+ // Each of the following arguments specifies the size of the corresponding
+ // argument passed to the enqueued block.
+ for (unsigned I = 4/*Position of the first size arg*/; I < NumArgs; ++I)
+ Args.push_back(
+ Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy));
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), true);
@@ -2279,29 +2533,26 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Any calls now have event arguments passed.
if (NumArgs >= 7) {
llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
- unsigned AS4 =
- E->getArg(4)->getType()->isArrayType()
- ? E->getArg(4)->getType().getAddressSpace()
- : E->getArg(4)->getType()->getPointeeType().getAddressSpace();
- llvm::Type *EventPtrAS4Ty =
- EventTy->getPointerTo(CGM.getContext().getTargetAddressSpace(AS4));
- unsigned AS5 =
- E->getArg(5)->getType()->getPointeeType().getAddressSpace();
- llvm::Type *EventPtrAS5Ty =
- EventTy->getPointerTo(CGM.getContext().getTargetAddressSpace(AS5));
-
- llvm::Value *NumEvents = EmitScalarExpr(E->getArg(3));
+ llvm::Type *EventPtrTy = EventTy->getPointerTo(
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
+
+ llvm::Value *NumEvents =
+ Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
llvm::Value *EventList =
E->getArg(4)->getType()->isArrayType()
? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
: EmitScalarExpr(E->getArg(4));
llvm::Value *ClkEvent = EmitScalarExpr(E->getArg(5));
+ // Convert to generic address space.
+ EventList = Builder.CreatePointerCast(EventList, EventPtrTy);
+ ClkEvent = Builder.CreatePointerCast(ClkEvent, EventPtrTy);
llvm::Value *Block =
Builder.CreateBitCast(EmitScalarExpr(E->getArg(6)), Int8PtrTy);
- std::vector<llvm::Type *> ArgTys = {
- QueueTy, Int32Ty, RangeTy, Int32Ty,
- EventPtrAS4Ty, EventPtrAS5Ty, Int8PtrTy};
+ std::vector<llvm::Type *> ArgTys = {QueueTy, Int32Ty, RangeTy,
+ Int32Ty, EventPtrTy, EventPtrTy,
+ Int8PtrTy};
+
std::vector<llvm::Value *> Args = {Queue, Flags, Range, NumEvents,
EventList, ClkEvent, Block};
@@ -2320,17 +2571,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
ArgTys.push_back(Int32Ty);
Name = "__enqueue_kernel_events_vaargs";
- // Add the variadics.
- for (unsigned I = 7; I < NumArgs; ++I) {
- llvm::Value *ArgSize = EmitScalarExpr(E->getArg(I));
- unsigned TypeSizeInBytes =
- getContext()
- .getTypeSizeInChars(E->getArg(I)->getType())
- .getQuantity();
- Args.push_back(TypeSizeInBytes < 4
- ? Builder.CreateZExt(ArgSize, Int32Ty)
- : ArgSize);
- }
+ // Each of the following arguments specifies the size of the corresponding
+ // argument passed to the enqueued block.
+ for (unsigned I = 7/*Position of the first size arg*/; I < NumArgs; ++I)
+ Args.push_back(
+ Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy));
+
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), true);
return RValue::get(
@@ -2373,6 +2619,76 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Fall through - it's already mapped to the intrinsic by GCCBuiltin.
break;
}
+ case Builtin::BI__builtin_os_log_format: {
+ assert(E->getNumArgs() >= 2 &&
+ "__builtin_os_log_format takes at least 2 arguments");
+ analyze_os_log::OSLogBufferLayout Layout;
+ analyze_os_log::computeOSLogBufferLayout(CGM.getContext(), E, Layout);
+ Address BufAddr = EmitPointerWithAlignment(E->getArg(0));
+ // Ignore argument 1, the format string. It is not currently used.
+ CharUnits Offset;
+ Builder.CreateStore(
+ Builder.getInt8(Layout.getSummaryByte()),
+ Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
+ Builder.CreateStore(
+ Builder.getInt8(Layout.getNumArgsByte()),
+ Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
+
+ llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
+ for (const auto &Item : Layout.Items) {
+ Builder.CreateStore(
+ Builder.getInt8(Item.getDescriptorByte()),
+ Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
+ Builder.CreateStore(
+ Builder.getInt8(Item.getSizeByte()),
+ Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
+ Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset);
+ if (const Expr *TheExpr = Item.getExpr()) {
+ Addr = Builder.CreateElementBitCast(
+ Addr, ConvertTypeForMem(TheExpr->getType()));
+ // Check if this is a retainable type.
+ if (TheExpr->getType()->isObjCRetainableType()) {
+ assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
+ "Only scalar can be a ObjC retainable type");
+ llvm::Value *SV = EmitScalarExpr(TheExpr, /*Ignore*/ false);
+ RValue RV = RValue::get(SV);
+ LValue LV = MakeAddrLValue(Addr, TheExpr->getType());
+ EmitStoreThroughLValue(RV, LV);
+ // Check if the object is constant, if not, save it in
+ // RetainableOperands.
+ if (!isa<Constant>(SV))
+ RetainableOperands.push_back(SV);
+ } else {
+ EmitAnyExprToMem(TheExpr, Addr, Qualifiers(), /*isInit*/ true);
+ }
+ } else {
+ Addr = Builder.CreateElementBitCast(Addr, Int32Ty);
+ Builder.CreateStore(
+ Builder.getInt32(Item.getConstValue().getQuantity()), Addr);
+ }
+ Offset += Item.size();
+ }
+
+ // Push a clang.arc.use cleanup for each object in RetainableOperands. The
+ // cleanup will cause the use to appear after the final log call, keeping
+ // the object valid while it’s held in the log buffer. Note that if there’s
+ // a release cleanup on the object, it will already be active; since
+ // cleanups are emitted in reverse order, the use will occur before the
+ // object is released.
+ if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount &&
+ CGM.getCodeGenOpts().OptimizationLevel != 0)
+ for (llvm::Value *object : RetainableOperands)
+ pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), object);
+
+ return RValue::get(BufAddr.getPointer());
+ }
+
+ case Builtin::BI__builtin_os_log_format_buffer_size: {
+ analyze_os_log::OSLogBufferLayout Layout;
+ analyze_os_log::computeOSLogBufferLayout(CGM.getContext(), E, Layout);
+ return RValue::get(ConstantInt::get(ConvertType(E->getType()),
+ Layout.size().getQuantity()));
+ }
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
@@ -2385,7 +2701,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// If this is a predefined lib function (e.g. malloc), emit the call
// using exactly the normal call path.
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
- return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
+ return emitLibraryCall(*this, FD, E,
+ cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
// Check that a call to a target specific builtin has the correct target
// features.
@@ -2397,14 +2714,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
- if (const char *Prefix =
- llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) {
- IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
+ StringRef Prefix =
+ llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
+ if (!Prefix.empty()) {
+ IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
// NOTE we dont need to perform a compatibility flag check here since the
// intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
// MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
if (IntrinsicID == Intrinsic::not_intrinsic)
- IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix, Name);
+ IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
}
if (IntrinsicID != Intrinsic::not_intrinsic) {
@@ -3871,7 +4189,7 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
if (SysReg.empty()) {
const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
- SysReg = cast<StringLiteral>(SysRegStrExpr)->getString();
+ SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
}
llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
@@ -4120,19 +4438,21 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(Ty));
- LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
+ llvm::Type *PtrTy = llvm::IntegerType::get(
+ getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
+ LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex,
- LoadAddr->getType());
+ PtrTy);
Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
else {
+ llvm::Type *IntResTy = llvm::IntegerType::get(
+ getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
@@ -4173,7 +4493,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
else {
- StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
+ llvm::Type *IntTy = llvm::IntegerType::get(
+ getLLVMContext(),
+ CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
+ StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
}
@@ -4184,6 +4507,41 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
}
+ switch (BuiltinID) {
+ case ARM::BI__iso_volatile_load8:
+ case ARM::BI__iso_volatile_load16:
+ case ARM::BI__iso_volatile_load32:
+ case ARM::BI__iso_volatile_load64: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits LoadSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ LoadSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::LoadInst *Load =
+ Builder.CreateAlignedLoad(Ptr, LoadSize);
+ Load->setVolatile(true);
+ return Load;
+ }
+ case ARM::BI__iso_volatile_store8:
+ case ARM::BI__iso_volatile_store16:
+ case ARM::BI__iso_volatile_store32:
+ case ARM::BI__iso_volatile_store64: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Value = EmitScalarExpr(E->getArg(1));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ StoreSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::StoreInst *Store =
+ Builder.CreateAlignedStore(Value, Ptr,
+ StoreSize);
+ Store->setVolatile(true);
+ return Store;
+ }
+ }
+
if (BuiltinID == ARM::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
return Builder.CreateCall(F);
@@ -4397,6 +4755,29 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
Ops[3], Ops[4], Ops[5]});
}
+ case ARM::BI_BitScanForward:
+ case ARM::BI_BitScanForward64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
+ case ARM::BI_BitScanReverse:
+ case ARM::BI_BitScanReverse64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
+
+ case ARM::BI_InterlockedAnd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
+ case ARM::BI_InterlockedExchange64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
+ case ARM::BI_InterlockedExchangeAdd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
+ case ARM::BI_InterlockedExchangeSub64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
+ case ARM::BI_InterlockedOr64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
+ case ARM::BI_InterlockedXor64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
+ case ARM::BI_InterlockedDecrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
+ case ARM::BI_InterlockedIncrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
}
// Get the last argument, which specifies the vector type.
@@ -4889,19 +5270,21 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(Ty));
- LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
+ llvm::Type *PtrTy = llvm::IntegerType::get(
+ getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
+ LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
- LoadAddr->getType());
+ PtrTy);
Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
+ llvm::Type *IntResTy = llvm::IntegerType::get(
+ getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
@@ -4940,7 +5323,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
else {
- StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
+ llvm::Type *IntTy = llvm::IntegerType::get(
+ getLLVMContext(),
+ CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
+ StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
}
@@ -5065,9 +5451,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vldrq_p128: {
- llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
+ llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
+ llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
- return Builder.CreateDefaultAlignedLoad(Ptr);
+ return Builder.CreateAlignedLoad(Int128Ty, Ptr,
+ CharUnits::fromQuantity(16));
}
case NEON::BI__builtin_neon_vstrq_p128: {
llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
@@ -6240,27 +6628,37 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vld1_v:
- case NEON::BI__builtin_neon_vld1q_v:
+ case NEON::BI__builtin_neon_vld1q_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
- return Builder.CreateDefaultAlignedLoad(Ops[0]);
+ auto Alignment = CharUnits::fromQuantity(
+ BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16);
+ return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment);
+ }
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
case NEON::BI__builtin_neon_vld1_lane_v:
- case NEON::BI__builtin_neon_vld1q_lane_v:
+ case NEON::BI__builtin_neon_vld1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateDefaultAlignedLoad(Ops[0]);
+ auto Alignment = CharUnits::fromQuantity(
+ BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16);
+ Ops[0] =
+ Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
+ }
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateDefaultAlignedLoad(Ops[0]);
+ auto Alignment = CharUnits::fromQuantity(
+ BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16);
+ Ops[0] =
+ Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
return EmitNeonSplat(Ops[0], CI);
@@ -6620,6 +7018,26 @@ static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
return CGF.Builder.CreateMaskedLoad(Ops[0], Align, MaskVec, Ops[1]);
}
+static Value *EmitX86SubVectorBroadcast(CodeGenFunction &CGF,
+ SmallVectorImpl<Value *> &Ops,
+ llvm::Type *DstTy,
+ unsigned SrcSizeInBits,
+ unsigned Align) {
+ // Load the subvector.
+ Ops[0] = CGF.Builder.CreateAlignedLoad(Ops[0], Align);
+
+ // Create broadcast mask.
+ unsigned NumDstElts = DstTy->getVectorNumElements();
+ unsigned NumSrcElts = SrcSizeInBits / DstTy->getScalarSizeInBits();
+
+ SmallVector<uint32_t, 8> Mask;
+ for (unsigned i = 0; i != NumDstElts; i += NumSrcElts)
+ for (unsigned j = 0; j != NumSrcElts; ++j)
+ Mask.push_back(j);
+
+ return CGF.Builder.CreateShuffleVector(Ops[0], Ops[0], Mask, "subvecbcst");
+}
+
static Value *EmitX86Select(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) {
@@ -6676,6 +7094,18 @@ static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
std::max(NumElts, 8U)));
}
+static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
+ ArrayRef<Value *> Ops) {
+ Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
+ Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
+
+ if (Ops.size() == 2)
+ return Res;
+
+ assert(Ops.size() == 4);
+ return EmitX86Select(CGF, Ops[3], Res, Ops[2]);
+}
+
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == X86::BI__builtin_ms_va_start ||
@@ -6860,6 +7290,25 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, Data});
}
+ case X86::BI_mm_clflush: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
+ Ops[0]);
+ }
+ case X86::BI_mm_lfence: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
+ }
+ case X86::BI_mm_mfence: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
+ }
+ case X86::BI_mm_sfence: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
+ }
+ case X86::BI_mm_pause: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
+ }
+ case X86::BI__rdtsc: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
+ }
case X86::BI__builtin_ia32_undef128:
case X86::BI__builtin_ia32_undef256:
case X86::BI__builtin_ia32_undef512:
@@ -6872,12 +7321,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vec_ext_v2si:
return Builder.CreateExtractElement(Ops[0],
llvm::ConstantInt::get(Ops[1]->getType(), 0));
+ case X86::BI_mm_setcsr:
case X86::BI__builtin_ia32_ldmxcsr: {
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
}
+ case X86::BI_mm_getcsr:
case X86::BI__builtin_ia32_stmxcsr: {
Address Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
@@ -6944,6 +7395,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_storeups512_mask:
return EmitX86MaskedStore(*this, Ops, 1);
+ case X86::BI__builtin_ia32_storess128_mask:
+ case X86::BI__builtin_ia32_storesd128_mask: {
+ return EmitX86MaskedStore(*this, Ops, 16);
+ }
case X86::BI__builtin_ia32_movdqa32store128_mask:
case X86::BI__builtin_ia32_movdqa64store128_mask:
case X86::BI__builtin_ia32_storeaps128_mask:
@@ -6980,6 +7435,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_loaddqudi512_mask:
return EmitX86MaskedLoad(*this, Ops, 1);
+ case X86::BI__builtin_ia32_loadss128_mask:
+ case X86::BI__builtin_ia32_loadsd128_mask:
+ return EmitX86MaskedLoad(*this, Ops, 16);
+
case X86::BI__builtin_ia32_loadaps128_mask:
case X86::BI__builtin_ia32_loadaps256_mask:
case X86::BI__builtin_ia32_loadaps512_mask:
@@ -6996,6 +7455,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
return EmitX86MaskedLoad(*this, Ops, Align);
}
+
+ case X86::BI__builtin_ia32_vbroadcastf128_pd256:
+ case X86::BI__builtin_ia32_vbroadcastf128_ps256: {
+ llvm::Type *DstTy = ConvertType(E->getType());
+ return EmitX86SubVectorBroadcast(*this, Ops, DstTy, 128, 1);
+ }
+
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
@@ -7015,8 +7481,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr256:
- case X86::BI__builtin_ia32_palignr128_mask:
- case X86::BI__builtin_ia32_palignr256_mask:
case X86::BI__builtin_ia32_palignr512_mask: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
@@ -7059,36 +7523,26 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_movnti:
- case X86::BI__builtin_ia32_movnti64: {
- llvm::MDNode *Node = llvm::MDNode::get(
- getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
-
- // Convert the type of the pointer to a pointer to the stored type.
- Value *BC = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()),
- "cast");
- StoreInst *SI = Builder.CreateDefaultAlignedStore(Ops[1], BC);
- SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
-
- // No alignment for scalar intrinsic store.
- SI->setAlignment(1);
- return SI;
- }
+ case X86::BI__builtin_ia32_movnti64:
case X86::BI__builtin_ia32_movntsd:
case X86::BI__builtin_ia32_movntss: {
llvm::MDNode *Node = llvm::MDNode::get(
getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+ Value *Ptr = Ops[0];
+ Value *Src = Ops[1];
+
// Extract the 0'th element of the source vector.
- Value *Scl = Builder.CreateExtractElement(Ops[1], (uint64_t)0, "extract");
+ if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
+ BuiltinID == X86::BI__builtin_ia32_movntss)
+ Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
// Convert the type of the pointer to a pointer to the stored type.
- Value *BC = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Scl->getType()),
- "cast");
+ Value *BC = Builder.CreateBitCast(
+ Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
// Unaligned nontemporal store of the scalar value.
- StoreInst *SI = Builder.CreateDefaultAlignedStore(Scl, BC);
+ StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
SI->setAlignment(1);
return SI;
@@ -7182,43 +7636,58 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Ops[1]);
}
- // TODO: Handle 64/512-bit vector widths of min/max.
case X86::BI__builtin_ia32_pmaxsb128:
case X86::BI__builtin_ia32_pmaxsw128:
case X86::BI__builtin_ia32_pmaxsd128:
+ case X86::BI__builtin_ia32_pmaxsq128_mask:
case X86::BI__builtin_ia32_pmaxsb256:
case X86::BI__builtin_ia32_pmaxsw256:
- case X86::BI__builtin_ia32_pmaxsd256: {
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Ops[1]);
- return Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
- }
+ case X86::BI__builtin_ia32_pmaxsd256:
+ case X86::BI__builtin_ia32_pmaxsq256_mask:
+ case X86::BI__builtin_ia32_pmaxsb512_mask:
+ case X86::BI__builtin_ia32_pmaxsw512_mask:
+ case X86::BI__builtin_ia32_pmaxsd512_mask:
+ case X86::BI__builtin_ia32_pmaxsq512_mask:
+ return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops);
case X86::BI__builtin_ia32_pmaxub128:
case X86::BI__builtin_ia32_pmaxuw128:
case X86::BI__builtin_ia32_pmaxud128:
+ case X86::BI__builtin_ia32_pmaxuq128_mask:
case X86::BI__builtin_ia32_pmaxub256:
case X86::BI__builtin_ia32_pmaxuw256:
- case X86::BI__builtin_ia32_pmaxud256: {
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_UGT, Ops[0], Ops[1]);
- return Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
- }
+ case X86::BI__builtin_ia32_pmaxud256:
+ case X86::BI__builtin_ia32_pmaxuq256_mask:
+ case X86::BI__builtin_ia32_pmaxub512_mask:
+ case X86::BI__builtin_ia32_pmaxuw512_mask:
+ case X86::BI__builtin_ia32_pmaxud512_mask:
+ case X86::BI__builtin_ia32_pmaxuq512_mask:
+ return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops);
case X86::BI__builtin_ia32_pminsb128:
case X86::BI__builtin_ia32_pminsw128:
case X86::BI__builtin_ia32_pminsd128:
+ case X86::BI__builtin_ia32_pminsq128_mask:
case X86::BI__builtin_ia32_pminsb256:
case X86::BI__builtin_ia32_pminsw256:
- case X86::BI__builtin_ia32_pminsd256: {
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SLT, Ops[0], Ops[1]);
- return Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
- }
+ case X86::BI__builtin_ia32_pminsd256:
+ case X86::BI__builtin_ia32_pminsq256_mask:
+ case X86::BI__builtin_ia32_pminsb512_mask:
+ case X86::BI__builtin_ia32_pminsw512_mask:
+ case X86::BI__builtin_ia32_pminsd512_mask:
+ case X86::BI__builtin_ia32_pminsq512_mask:
+ return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops);
case X86::BI__builtin_ia32_pminub128:
case X86::BI__builtin_ia32_pminuw128:
case X86::BI__builtin_ia32_pminud128:
+ case X86::BI__builtin_ia32_pminuq128_mask:
case X86::BI__builtin_ia32_pminub256:
case X86::BI__builtin_ia32_pminuw256:
- case X86::BI__builtin_ia32_pminud256: {
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, Ops[0], Ops[1]);
- return Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
- }
+ case X86::BI__builtin_ia32_pminud256:
+ case X86::BI__builtin_ia32_pminuq256_mask:
+ case X86::BI__builtin_ia32_pminub512_mask:
+ case X86::BI__builtin_ia32_pminuw512_mask:
+ case X86::BI__builtin_ia32_pminud512_mask:
+ case X86::BI__builtin_ia32_pminuq512_mask:
+ return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
@@ -7363,6 +7832,87 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
case X86::BI__builtin_ia32_cmpordsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
+
+ case X86::BI__emul:
+ case X86::BI__emulu: {
+ llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
+ bool isSigned = (BuiltinID == X86::BI__emul);
+ Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
+ Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
+ return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
+ }
+ case X86::BI__mulh:
+ case X86::BI__umulh:
+ case X86::BI_mul128:
+ case X86::BI_umul128: {
+ llvm::Type *ResType = ConvertType(E->getType());
+ llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
+
+ bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
+ Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
+ Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
+
+ Value *MulResult, *HigherBits;
+ if (IsSigned) {
+ MulResult = Builder.CreateNSWMul(LHS, RHS);
+ HigherBits = Builder.CreateAShr(MulResult, 64);
+ } else {
+ MulResult = Builder.CreateNUWMul(LHS, RHS);
+ HigherBits = Builder.CreateLShr(MulResult, 64);
+ }
+ HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
+
+ if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
+ return HigherBits;
+
+ Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
+ Builder.CreateStore(HigherBits, HighBitsAddress);
+ return Builder.CreateIntCast(MulResult, ResType, IsSigned);
+ }
+
+ case X86::BI__faststorefence: {
+ return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
+ llvm::CrossThread);
+ }
+ case X86::BI_ReadWriteBarrier:
+ case X86::BI_ReadBarrier:
+ case X86::BI_WriteBarrier: {
+ return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
+ llvm::SingleThread);
+ }
+ case X86::BI_BitScanForward:
+ case X86::BI_BitScanForward64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
+ case X86::BI_BitScanReverse:
+ case X86::BI_BitScanReverse64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
+
+ case X86::BI_InterlockedAnd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
+ case X86::BI_InterlockedExchange64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
+ case X86::BI_InterlockedExchangeAdd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
+ case X86::BI_InterlockedExchangeSub64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
+ case X86::BI_InterlockedOr64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
+ case X86::BI_InterlockedXor64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
+ case X86::BI_InterlockedDecrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
+ case X86::BI_InterlockedIncrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
+
+ case X86::BI_AddressOfReturnAddress: {
+ Value *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
+ return Builder.CreateCall(F);
+ }
+ case X86::BI__stosb: {
+ // We treat __stosb as a volatile memset - it may not generate "rep stosb"
+ // instruction, but it will create a memset that won't be optimized away.
+ return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], 1, true);
+ }
}
}
@@ -7384,7 +7934,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_ppc_get_timebase:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
- // vec_ld, vec_lvsl, vec_lvsr
+ // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
case PPC::BI__builtin_altivec_lvx:
case PPC::BI__builtin_altivec_lvxl:
case PPC::BI__builtin_altivec_lvebx:
@@ -7394,11 +7944,19 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_lvsr:
case PPC::BI__builtin_vsx_lxvd2x:
case PPC::BI__builtin_vsx_lxvw4x:
+ case PPC::BI__builtin_vsx_lxvd2x_be:
+ case PPC::BI__builtin_vsx_lxvw4x_be:
+ case PPC::BI__builtin_vsx_lxvl:
+ case PPC::BI__builtin_vsx_lxvll:
{
- Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
-
- Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
- Ops.pop_back();
+ if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
+ BuiltinID == PPC::BI__builtin_vsx_lxvll){
+ Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
+ }else {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
+ Ops.pop_back();
+ }
switch (BuiltinID) {
default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
@@ -7429,12 +7987,24 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_lxvw4x:
ID = Intrinsic::ppc_vsx_lxvw4x;
break;
+ case PPC::BI__builtin_vsx_lxvd2x_be:
+ ID = Intrinsic::ppc_vsx_lxvd2x_be;
+ break;
+ case PPC::BI__builtin_vsx_lxvw4x_be:
+ ID = Intrinsic::ppc_vsx_lxvw4x_be;
+ break;
+ case PPC::BI__builtin_vsx_lxvl:
+ ID = Intrinsic::ppc_vsx_lxvl;
+ break;
+ case PPC::BI__builtin_vsx_lxvll:
+ ID = Intrinsic::ppc_vsx_lxvll;
+ break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
- // vec_st
+ // vec_st, vec_xst_be
case PPC::BI__builtin_altivec_stvx:
case PPC::BI__builtin_altivec_stvxl:
case PPC::BI__builtin_altivec_stvebx:
@@ -7442,10 +8012,19 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvewx:
case PPC::BI__builtin_vsx_stxvd2x:
case PPC::BI__builtin_vsx_stxvw4x:
+ case PPC::BI__builtin_vsx_stxvd2x_be:
+ case PPC::BI__builtin_vsx_stxvw4x_be:
+ case PPC::BI__builtin_vsx_stxvl:
+ case PPC::BI__builtin_vsx_stxvll:
{
- Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
- Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
- Ops.pop_back();
+ if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
+ BuiltinID == PPC::BI__builtin_vsx_stxvll ){
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
+ }else {
+ Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
+ Ops.pop_back();
+ }
switch (BuiltinID) {
default: llvm_unreachable("Unsupported st intrinsic!");
@@ -7470,6 +8049,18 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_stxvw4x:
ID = Intrinsic::ppc_vsx_stxvw4x;
break;
+ case PPC::BI__builtin_vsx_stxvd2x_be:
+ ID = Intrinsic::ppc_vsx_stxvd2x_be;
+ break;
+ case PPC::BI__builtin_vsx_stxvw4x_be:
+ ID = Intrinsic::ppc_vsx_stxvw4x_be;
+ break;
+ case PPC::BI__builtin_vsx_stxvl:
+ ID = Intrinsic::ppc_vsx_stxvl;
+ break;
+ case PPC::BI__builtin_vsx_stxvll:
+ ID = Intrinsic::ppc_vsx_stxvll;
+ break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
@@ -7494,6 +8085,25 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
+ case PPC::BI__builtin_altivec_vctzb:
+ case PPC::BI__builtin_altivec_vctzh:
+ case PPC::BI__builtin_altivec_vctzw:
+ case PPC::BI__builtin_altivec_vctzd: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
+ Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
+ return Builder.CreateCall(F, {X, Undef});
+ }
+ case PPC::BI__builtin_altivec_vpopcntb:
+ case PPC::BI__builtin_altivec_vpopcnth:
+ case PPC::BI__builtin_altivec_vpopcntw:
+ case PPC::BI__builtin_altivec_vpopcntd: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
+ return Builder.CreateCall(F, X);
+ }
// Copy sign
case PPC::BI__builtin_vsx_xvcpsgnsp:
case PPC::BI__builtin_vsx_xvcpsgndp: {
@@ -7625,45 +8235,73 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
}
+
+ case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
+ return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
case AMDGPU::BI__builtin_amdgcn_div_fixup:
case AMDGPU::BI__builtin_amdgcn_div_fixupf:
+ case AMDGPU::BI__builtin_amdgcn_div_fixuph:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
case AMDGPU::BI__builtin_amdgcn_trig_preop:
case AMDGPU::BI__builtin_amdgcn_trig_preopf:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
case AMDGPU::BI__builtin_amdgcn_rcp:
case AMDGPU::BI__builtin_amdgcn_rcpf:
+ case AMDGPU::BI__builtin_amdgcn_rcph:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
case AMDGPU::BI__builtin_amdgcn_rsq:
case AMDGPU::BI__builtin_amdgcn_rsqf:
+ case AMDGPU::BI__builtin_amdgcn_rsqh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
case AMDGPU::BI__builtin_amdgcn_sinf:
+ case AMDGPU::BI__builtin_amdgcn_sinh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
case AMDGPU::BI__builtin_amdgcn_cosf:
+ case AMDGPU::BI__builtin_amdgcn_cosh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
case AMDGPU::BI__builtin_amdgcn_log_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
case AMDGPU::BI__builtin_amdgcn_ldexp:
case AMDGPU::BI__builtin_amdgcn_ldexpf:
+ case AMDGPU::BI__builtin_amdgcn_ldexph:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
case AMDGPU::BI__builtin_amdgcn_frexp_mant:
- case AMDGPU::BI__builtin_amdgcn_frexp_mantf: {
+ case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
+ case AMDGPU::BI__builtin_amdgcn_frexp_manth:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
- }
case AMDGPU::BI__builtin_amdgcn_frexp_exp:
case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_exp);
+ Value *Src0 = EmitScalarExpr(E->getArg(0));
+ Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
+ { Builder.getInt32Ty(), Src0->getType() });
+ return Builder.CreateCall(F, Src0);
+ }
+ case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
+ Value *Src0 = EmitScalarExpr(E->getArg(0));
+ Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
+ { Builder.getInt16Ty(), Src0->getType() });
+ return Builder.CreateCall(F, Src0);
}
case AMDGPU::BI__builtin_amdgcn_fract:
case AMDGPU::BI__builtin_amdgcn_fractf:
+ case AMDGPU::BI__builtin_amdgcn_fracth:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
case AMDGPU::BI__builtin_amdgcn_lerp:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
+ case AMDGPU::BI__builtin_amdgcn_uicmp:
+ case AMDGPU::BI__builtin_amdgcn_uicmpl:
+ case AMDGPU::BI__builtin_amdgcn_sicmp:
+ case AMDGPU::BI__builtin_amdgcn_sicmpl:
+ return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_icmp);
+ case AMDGPU::BI__builtin_amdgcn_fcmp:
+ case AMDGPU::BI__builtin_amdgcn_fcmpf:
+ return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fcmp);
case AMDGPU::BI__builtin_amdgcn_class:
case AMDGPU::BI__builtin_amdgcn_classf:
+ case AMDGPU::BI__builtin_amdgcn_classh:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
case AMDGPU::BI__builtin_amdgcn_read_exec: {
@@ -7951,7 +8589,13 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
Ptr->getType()}),
{Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
};
-
+ auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(
+ CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
+ Ptr->getType()}),
+ {Ptr, EmitScalarExpr(E->getArg(1))});
+ };
switch (BuiltinID) {
case NVPTX::BI__nvvm_atom_add_gen_i:
case NVPTX::BI__nvvm_atom_add_gen_l:
@@ -8070,6 +8714,109 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
case NVPTX::BI__nvvm_ldg_d:
case NVPTX::BI__nvvm_ldg_d2:
return MakeLdg(Intrinsic::nvvm_ldg_global_f);
+
+ case NVPTX::BI__nvvm_atom_cta_add_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_add_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_add_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_add_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_add_gen_f:
+ case NVPTX::BI__nvvm_atom_cta_add_gen_d:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
+ case NVPTX::BI__nvvm_atom_sys_add_gen_f:
+ case NVPTX::BI__nvvm_atom_sys_add_gen_d:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
+ case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_max_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
+ case NVPTX::BI__nvvm_atom_cta_max_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
+ case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
+ case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_max_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
+ case NVPTX::BI__nvvm_atom_sys_max_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
+ case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
+ case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_min_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
+ case NVPTX::BI__nvvm_atom_cta_min_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
+ case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
+ case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_min_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
+ case NVPTX::BI__nvvm_atom_sys_min_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
+ case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
+ case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_and_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_and_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_and_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_and_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_or_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_or_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_or_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_or_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(
+ CGM.getIntrinsic(
+ Intrinsic::nvvm_atomic_cas_gen_i_cta,
+ {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
+ {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
+ }
+ case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(
+ CGM.getIntrinsic(
+ Intrinsic::nvvm_atomic_cas_gen_i_sys,
+ {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
+ {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
+ }
default:
return nullptr;
}
diff --git a/lib/CodeGen/CGCUDABuiltin.cpp b/lib/CodeGen/CGCUDABuiltin.cpp
index ea3b888635c3..44dd003757ad 100644
--- a/lib/CodeGen/CGCUDABuiltin.cpp
+++ b/lib/CodeGen/CGCUDABuiltin.cpp
@@ -99,6 +99,12 @@ CodeGenFunction::EmitCUDADevicePrintfCallExpr(const CallExpr *E,
llvm::SmallVector<llvm::Type *, 8> ArgTypes;
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I)
ArgTypes.push_back(Args[I].RV.getScalarVal()->getType());
+
+ // Using llvm::StructType is correct only because printf doesn't accept
+ // aggregates. If we had to handle aggregates here, we'd have to manually
+ // compute the offsets within the alloca -- we wouldn't be able to assume
+ // that the alignment of the llvm type was the same as the alignment of the
+ // clang type.
llvm::Type *AllocaTy = llvm::StructType::create(ArgTypes, "printf_args");
llvm::Value *Alloca = CreateTempAlloca(AllocaTy);
diff --git a/lib/CodeGen/CGCUDANV.cpp b/lib/CodeGen/CGCUDANV.cpp
index 6a04d4eea784..83febcb4af8c 100644
--- a/lib/CodeGen/CGCUDANV.cpp
+++ b/lib/CodeGen/CGCUDANV.cpp
@@ -15,6 +15,7 @@
#include "CGCUDARuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantBuilder.h"
#include "clang/AST/Decl.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
@@ -29,7 +30,8 @@ namespace {
class CGNVCUDARuntime : public CGCUDARuntime {
private:
- llvm::Type *IntTy, *SizeTy, *VoidTy;
+ llvm::IntegerType *IntTy, *SizeTy;
+ llvm::Type *VoidTy;
llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy;
/// Convenience reference to LLVM Context
@@ -55,10 +57,18 @@ private:
/// where the C code specifies const char*.
llvm::Constant *makeConstantString(const std::string &Str,
const std::string &Name = "",
+ const std::string &SectionName = "",
unsigned Alignment = 0) {
llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
llvm::ConstantInt::get(SizeTy, 0)};
auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ llvm::GlobalVariable *GV =
+ cast<llvm::GlobalVariable>(ConstStr.getPointer());
+ if (!SectionName.empty())
+ GV->setSection(SectionName);
+ if (Alignment)
+ GV->setAlignment(Alignment);
+
return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
ConstStr.getPointer(), Zeros);
}
@@ -87,9 +97,9 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
- IntTy = Types.ConvertType(Ctx.IntTy);
- SizeTy = Types.ConvertType(Ctx.getSizeType());
- VoidTy = llvm::Type::getVoidTy(Context);
+ IntTy = CGM.IntTy;
+ SizeTy = CGM.SizeTy;
+ VoidTy = CGM.VoidTy;
CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
@@ -118,37 +128,28 @@ void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
void CGNVCUDARuntime::emitDeviceStubBody(CodeGenFunction &CGF,
FunctionArgList &Args) {
- // Build the argument value list and the argument stack struct type.
- SmallVector<llvm::Value *, 16> ArgValues;
- std::vector<llvm::Type *> ArgTypes;
- for (FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
- I != E; ++I) {
- llvm::Value *V = CGF.GetAddrOfLocalVar(*I).getPointer();
- ArgValues.push_back(V);
- assert(isa<llvm::PointerType>(V->getType()) && "Arg type not PointerType");
- ArgTypes.push_back(cast<llvm::PointerType>(V->getType())->getElementType());
- }
- llvm::StructType *ArgStackTy = llvm::StructType::get(Context, ArgTypes);
-
- llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
-
- // Emit the calls to cudaSetupArgument
+ // Emit a call to cudaSetupArgument for each arg in Args.
llvm::Constant *cudaSetupArgFn = getSetupArgumentFn();
- for (unsigned I = 0, E = Args.size(); I != E; ++I) {
- llvm::Value *Args[3];
- llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
- Args[0] = CGF.Builder.CreatePointerCast(ArgValues[I], VoidPtrTy);
- Args[1] = CGF.Builder.CreateIntCast(
- llvm::ConstantExpr::getSizeOf(ArgTypes[I]),
- SizeTy, false);
- Args[2] = CGF.Builder.CreateIntCast(
- llvm::ConstantExpr::getOffsetOf(ArgStackTy, I),
- SizeTy, false);
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
+ CharUnits Offset = CharUnits::Zero();
+ for (const VarDecl *A : Args) {
+ CharUnits TyWidth, TyAlign;
+ std::tie(TyWidth, TyAlign) =
+ CGM.getContext().getTypeInfoInChars(A->getType());
+ Offset = Offset.alignTo(TyAlign);
+ llvm::Value *Args[] = {
+ CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
+ VoidPtrTy),
+ llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()),
+ llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
+ };
llvm::CallSite CS = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
llvm::Value *CSZero = CGF.Builder.CreateICmpEQ(CS.getInstruction(), Zero);
+ llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
CGF.Builder.CreateCondBr(CSZero, NextBlock, EndBlock);
CGF.EmitBlock(NextBlock);
+ Offset += TyWidth;
}
// Emit the call to cudaLaunch
@@ -290,18 +291,29 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
continue;
}
- // Create initialized wrapper structure that points to the loaded GPU binary
- llvm::Constant *Values[] = {
- llvm::ConstantInt::get(IntTy, 0x466243b1), // Fatbin wrapper magic.
- llvm::ConstantInt::get(IntTy, 1), // Fatbin version.
- makeConstantString(GpuBinaryOrErr.get()->getBuffer(), "", 16), // Data.
- llvm::ConstantPointerNull::get(VoidPtrTy)}; // Unused in fatbin v1.
- llvm::GlobalVariable *FatbinWrapper = new llvm::GlobalVariable(
- TheModule, FatbinWrapperTy, true, llvm::GlobalValue::InternalLinkage,
- llvm::ConstantStruct::get(FatbinWrapperTy, Values),
- "__cuda_fatbin_wrapper");
+ const char *FatbinConstantName =
+ CGM.getTriple().isMacOSX() ? "__NV_CUDA,__nv_fatbin" : ".nv_fatbin";
// NVIDIA's cuobjdump looks for fatbins in this section.
- FatbinWrapper->setSection(".nvFatBinSegment");
+ const char *FatbinSectionName =
+ CGM.getTriple().isMacOSX() ? "__NV_CUDA,__fatbin" : ".nvFatBinSegment";
+
+ // Create initialized wrapper structure that points to the loaded GPU binary
+ ConstantInitBuilder Builder(CGM);
+ auto Values = Builder.beginStruct(FatbinWrapperTy);
+ // Fatbin wrapper magic.
+ Values.addInt(IntTy, 0x466243b1);
+ // Fatbin version.
+ Values.addInt(IntTy, 1);
+ // Data.
+ Values.add(makeConstantString(GpuBinaryOrErr.get()->getBuffer(),
+ "", FatbinConstantName, 8));
+ // Unused in fatbin v1.
+ Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
+ llvm::GlobalVariable *FatbinWrapper =
+ Values.finishAndCreateGlobal("__cuda_fatbin_wrapper",
+ CGM.getPointerAlign(),
+ /*constant*/ true);
+ FatbinWrapper->setSection(FatbinSectionName);
// GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
diff --git a/lib/CodeGen/CGCUDARuntime.cpp b/lib/CodeGen/CGCUDARuntime.cpp
index 014a5dbd46d6..1936f9f13692 100644
--- a/lib/CodeGen/CGCUDARuntime.cpp
+++ b/lib/CodeGen/CGCUDARuntime.cpp
@@ -36,16 +36,7 @@ RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
eval.begin(CGF);
CGF.EmitBlock(ConfigOKBlock);
-
- const Decl *TargetDecl = nullptr;
- if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
- TargetDecl = DRE->getDecl();
- }
- }
-
- llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
- CGF.EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue, TargetDecl);
+ CGF.EmitSimpleCallExpr(E, ReturnValue);
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock);
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 40f1bc426ff7..59010f4407c2 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -134,6 +134,11 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
llvm::GlobalValue::LinkageTypes TargetLinkage =
getFunctionLinkage(TargetDecl);
+ // available_externally definitions aren't real definitions, so we cannot
+ // create an alias to one.
+ if (TargetLinkage == llvm::GlobalValue::AvailableExternallyLinkage)
+ return true;
+
// Check if we have it already.
StringRef MangledName = getMangledName(AliasDecl);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
@@ -156,14 +161,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
// Instead of creating as alias to a linkonce_odr, replace all of the uses
// of the aliasee.
- if (llvm::GlobalValue::isDiscardableIfUnused(Linkage) &&
- (TargetLinkage != llvm::GlobalValue::AvailableExternallyLinkage ||
- !TargetDecl.getDecl()->hasAttr<AlwaysInlineAttr>())) {
- // FIXME: An extern template instantiation will create functions with
- // linkage "AvailableExternally". In libc++, some classes also define
- // members with attribute "AlwaysInline" and expect no reference to
- // be generated. It is desirable to reenable this optimisation after
- // corresponding LLVM changes.
+ if (llvm::GlobalValue::isDiscardableIfUnused(Linkage)) {
addReplacement(MangledName, Aliasee);
return false;
}
@@ -220,7 +218,7 @@ llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
getTypes().arrangeCXXStructorDeclaration(MD, Type);
auto *Fn = cast<llvm::Function>(
getAddrOfCXXStructor(MD, Type, &FnInfo, /*FnType=*/nullptr,
- /*DontDefer=*/true, /*IsForDefinition=*/true));
+ /*DontDefer=*/true, ForDefinition));
GlobalDecl GD;
if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
@@ -241,7 +239,8 @@ llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
llvm::Constant *CodeGenModule::getAddrOfCXXStructor(
const CXXMethodDecl *MD, StructorType Type, const CGFunctionInfo *FnInfo,
- llvm::FunctionType *FnType, bool DontDefer, bool IsForDefinition) {
+ llvm::FunctionType *FnType, bool DontDefer,
+ ForDefinition_t IsForDefinition) {
GlobalDecl GD;
if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
GD = GlobalDecl(CD, toCXXCtorType(Type));
@@ -260,10 +259,10 @@ llvm::Constant *CodeGenModule::getAddrOfCXXStructor(
/*isThunk=*/false, /*ExtraAttrs=*/llvm::AttributeSet(), IsForDefinition);
}
-static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
- GlobalDecl GD,
- llvm::Type *Ty,
- const CXXRecordDecl *RD) {
+static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ llvm::Type *Ty,
+ const CXXRecordDecl *RD) {
assert(!CGF.CGM.getTarget().getCXXABI().isMicrosoft() &&
"No kext in Microsoft ABI");
GD = GD.getCanonicalDecl();
@@ -273,22 +272,26 @@ static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
VTable = CGF.Builder.CreateBitCast(VTable, Ty);
assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
- uint64_t AddressPoint =
- CGM.getItaniumVTableContext().getVTableLayout(RD)
- .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
- VTableIndex += AddressPoint;
+ const VTableLayout &VTLayout = CGM.getItaniumVTableContext().getVTableLayout(RD);
+ VTableLayout::AddressPointLocation AddressPoint =
+ VTLayout.getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
+ VTableIndex += VTLayout.getVTableOffset(AddressPoint.VTableIndex) +
+ AddressPoint.AddressPointIndex;
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
- return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
+ llvm::Value *VFunc =
+ CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
+ CGCallee Callee(GD.getDecl(), VFunc);
+ return Callee;
}
/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
/// indirect call to virtual functions. It makes the call through indexing
/// into the vtable.
-llvm::Value *
+CGCallee
CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
- NestedNameSpecifier *Qual,
- llvm::Type *Ty) {
+ NestedNameSpecifier *Qual,
+ llvm::Type *Ty) {
assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
"BuildAppleKextVirtualCall - bad Qual kind");
@@ -306,21 +309,15 @@ CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
/// BuildVirtualCall - This routine makes indirect vtable call for
/// call to virtual destructors. It returns 0 if it could not do it.
-llvm::Value *
+CGCallee
CodeGenFunction::BuildAppleKextVirtualDestructorCall(
const CXXDestructorDecl *DD,
CXXDtorType Type,
const CXXRecordDecl *RD) {
- const auto *MD = cast<CXXMethodDecl>(DD);
- // FIXME. Dtor_Base dtor is always direct!!
- // It need be somehow inline expanded into the caller.
- // -O does that. But need to support -O0 as well.
- if (MD->isVirtual() && Type != Dtor_Base) {
- // Compute the function type we're calling.
- const CGFunctionInfo &FInfo = CGM.getTypes().arrangeCXXStructorDeclaration(
- DD, StructorType::Complete);
- llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
- return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
- }
- return nullptr;
+ assert(DD->isVirtual() && Type != Dtor_Base);
+ // Compute the function type we're calling.
+ const CGFunctionInfo &FInfo = CGM.getTypes().arrangeCXXStructorDeclaration(
+ DD, StructorType::Complete);
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
+ return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
}
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index e4da447eddc7..df75a7d7ffde 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -73,7 +73,7 @@ CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
}
-llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(
+CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, Address This,
llvm::Value *&ThisPtrForCall,
llvm::Value *MemPtr, const MemberPointerType *MPT) {
@@ -86,7 +86,8 @@ llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
- return llvm::Constant::getNullValue(FTy->getPointerTo());
+ llvm::Constant *FnPtr = llvm::Constant::getNullValue(FTy->getPointerTo());
+ return CGCallee::forDirect(FnPtr, FPT);
}
llvm::Value *
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 9e10ec068e09..d53fd4cb63b2 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -35,6 +35,7 @@ class FieldDecl;
class MangleContext;
namespace CodeGen {
+class CGCallee;
class CodeGenFunction;
class CodeGenModule;
struct CatchTypeInfo;
@@ -154,7 +155,7 @@ public:
/// Load a member function from an object and a member function
/// pointer. Apply the this-adjustment and set 'This' to the
/// adjusted value.
- virtual llvm::Value *EmitLoadOfMemberFunctionPointer(
+ virtual CGCallee EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, Address This,
llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
const MemberPointerType *MPT);
@@ -403,11 +404,11 @@ public:
CharUnits VPtrOffset) = 0;
/// Build a virtual function pointer in the ABI-specific way.
- virtual llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF,
- GlobalDecl GD,
- Address This,
- llvm::Type *Ty,
- SourceLocation Loc) = 0;
+ virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ Address This,
+ llvm::Type *Ty,
+ SourceLocation Loc) = 0;
/// Emit the ABI-specific virtual destructor call.
virtual llvm::Value *
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 242b5962070a..9b96a59aec38 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -29,6 +29,7 @@
#include "clang/CodeGen/SwiftCallingConv.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/CallSite.h"
@@ -47,6 +48,7 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
default: return llvm::CallingConv::C;
case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
+ case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
@@ -172,6 +174,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
if (D->hasAttr<FastCallAttr>())
return CC_X86FastCall;
+ if (D->hasAttr<RegCallAttr>())
+ return CC_X86RegCall;
+
if (D->hasAttr<ThisCallAttr>())
return CC_X86ThisCall;
@@ -1647,6 +1652,8 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
if (TargetDecl->hasAttr<NoDuplicateAttr>())
FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
+ if (TargetDecl->hasAttr<ConvergentAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::Convergent);
if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
AddAttributesFromFunctionProtoType(
@@ -1676,6 +1683,14 @@ void CodeGenModule::ConstructAttributeList(
HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>();
HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
+ if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
+ Optional<unsigned> NumElemsParam;
+ // alloc_size args are base-1, 0 means not present.
+ if (unsigned N = AllocSize->getNumElemsParam())
+ NumElemsParam = N - 1;
+ FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1,
+ NumElemsParam);
+ }
}
// OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
@@ -1722,6 +1737,16 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute("less-precise-fpmad",
llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
+
+ if (!CodeGenOpts.FPDenormalMode.empty())
+ FuncAttrs.addAttribute("denormal-fp-math",
+ CodeGenOpts.FPDenormalMode);
+
+ FuncAttrs.addAttribute("no-trapping-math",
+ llvm::toStringRef(CodeGenOpts.NoTrappingMath));
+
+ // TODO: Are these all needed?
+ // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
FuncAttrs.addAttribute("no-infs-fp-math",
llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
FuncAttrs.addAttribute("no-nans-fp-math",
@@ -1734,6 +1759,15 @@ void CodeGenModule::ConstructAttributeList(
llvm::utostr(CodeGenOpts.SSPBufferSize));
FuncAttrs.addAttribute("no-signed-zeros-fp-math",
llvm::toStringRef(CodeGenOpts.NoSignedZeros));
+ FuncAttrs.addAttribute(
+ "correctly-rounded-divide-sqrt-fp-math",
+ llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
+
+ // TODO: Reciprocal estimate codegen options should apply to instructions?
+ std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals;
+ if (!Recips.empty())
+ FuncAttrs.addAttribute("reciprocal-estimates",
+ llvm::join(Recips.begin(), Recips.end(), ","));
if (CodeGenOpts.StackRealignment)
FuncAttrs.addAttribute("stackrealign");
@@ -1794,6 +1828,9 @@ void CodeGenModule::ConstructAttributeList(
// them). LLVM will remove this attribute where it safely can.
FuncAttrs.addAttribute(llvm::Attribute::Convergent);
+ // Exceptions aren't supported in CUDA device code.
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+
// Respect -fcuda-flush-denormals-to-zero.
if (getLangOpts().CUDADeviceFlushDenormalsToZero)
FuncAttrs.addAttribute("nvptx-f32ftz", "true");
@@ -2299,13 +2336,6 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
- if (const CXXMethodDecl *MD =
- dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
- if (MD->isVirtual() && Arg == CXXABIThisDecl)
- V = CGM.getCXXABI().
- adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
- }
-
// Because of merging of function types from multiple decls it is
// possible for the type of an argument to not match the corresponding
// type in the function type. Since we are codegening the callee
@@ -2465,7 +2495,7 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
// result is in a BasicBlock and is therefore an Instruction.
llvm::Instruction *generator = cast<llvm::Instruction>(result);
- SmallVector<llvm::Instruction*,4> insnsToKill;
+ SmallVector<llvm::Instruction *, 4> InstsToKill;
// Look for:
// %generator = bitcast %type1* %generator2 to %type2*
@@ -2478,7 +2508,7 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
if (generator->getNextNode() != bitcast)
return nullptr;
- insnsToKill.push_back(bitcast);
+ InstsToKill.push_back(bitcast);
}
// Look for:
@@ -2511,27 +2541,26 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
assert(isa<llvm::CallInst>(prev));
assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
- insnsToKill.push_back(prev);
+ InstsToKill.push_back(prev);
}
} else {
return nullptr;
}
result = call->getArgOperand(0);
- insnsToKill.push_back(call);
+ InstsToKill.push_back(call);
// Keep killing bitcasts, for sanity. Note that we no longer care
// about precise ordering as long as there's exactly one use.
while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
if (!bitcast->hasOneUse()) break;
- insnsToKill.push_back(bitcast);
+ InstsToKill.push_back(bitcast);
result = bitcast->getOperand(0);
}
// Delete all the unnecessary instructions, from latest to earliest.
- for (SmallVectorImpl<llvm::Instruction*>::iterator
- i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
- (*i)->eraseFromParent();
+ for (auto *I : InstsToKill)
+ I->eraseFromParent();
// Do the fused retain/autorelease if we were asked to.
if (doRetainAutorelease)
@@ -2841,7 +2870,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
EmitCheckSourceLocation(RetNNAttr->getLocation()),
};
EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
- "nonnull_return", StaticData, None);
+ SanitizerHandler::NonnullReturn, StaticData, None);
}
}
Ret = Builder.CreateRet(RV);
@@ -2863,13 +2892,13 @@ static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
// FIXME: Generate IR in one pass, rather than going back and fixing up these
// placeholders.
llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *Placeholder =
- llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
- Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
+ llvm::Type *IRPtrTy = IRTy->getPointerTo();
+ llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
// FIXME: When we generate this IR in one pass, we shouldn't need
// this win32-specific alignment hack.
CharUnits Align = CharUnits::fromQuantity(4);
+ Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
return AggValueSlot::forAddr(Address(Placeholder, Align),
Ty.getQualifiers(),
@@ -2891,22 +2920,36 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
"cannot emit delegate call arguments for inalloca arguments!");
+ // GetAddrOfLocalVar returns a pointer-to-pointer for references,
+ // but the argument needs to be the original pointer.
+ if (type->isReferenceType()) {
+ args.add(RValue::get(Builder.CreateLoad(local)), type);
+
+ // In ARC, move out of consumed arguments so that the release cleanup
+ // entered by StartFunction doesn't cause an over-release. This isn't
+ // optimal -O0 code generation, but it should get cleaned up when
+ // optimization is enabled. This also assumes that delegate calls are
+ // performed exactly once for a set of arguments, but that should be safe.
+ } else if (getLangOpts().ObjCAutoRefCount &&
+ param->hasAttr<NSConsumedAttr>() &&
+ type->isObjCRetainableType()) {
+ llvm::Value *ptr = Builder.CreateLoad(local);
+ auto null =
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
+ Builder.CreateStore(null, local);
+ args.add(RValue::get(ptr), type);
+
// For the most part, we just need to load the alloca, except that
// aggregate r-values are actually pointers to temporaries.
- if (type->isReferenceType())
- args.add(RValue::get(Builder.CreateLoad(local)), type);
- else
+ } else {
args.add(convertTempToRValue(local, type, loc), type);
+ }
}
static bool isProvablyNull(llvm::Value *addr) {
return isa<llvm::ConstantPointerNull>(addr);
}
-static bool isProvablyNonNull(llvm::Value *addr) {
- return isa<llvm::AllocaInst>(addr);
-}
-
/// Emit the actual writing-back of a writeback.
static void emitWriteback(CodeGenFunction &CGF,
const CallArgList::Writeback &writeback) {
@@ -2919,7 +2962,7 @@ static void emitWriteback(CodeGenFunction &CGF,
// If the argument wasn't provably non-null, we need to null check
// before doing the store.
- bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
+ bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer());
if (!provablyNonNull) {
llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
contBB = CGF.createBasicBlock("icr.done");
@@ -3059,7 +3102,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// If the address is *not* known to be non-null, we need to switch.
llvm::Value *finalArgument;
- bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
+ bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer());
if (provablyNonNull) {
finalArgument = temp.getPointer();
} else {
@@ -3130,7 +3173,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
}
void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
- assert(!StackBase && !StackCleanup.isValid());
+ assert(!StackBase);
// Save the stack.
llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
@@ -3167,13 +3210,14 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
};
EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
- "nonnull_arg", StaticData, None);
+ SanitizerHandler::NonnullArg, StaticData, None);
}
void CodeGenFunction::EmitCallArgs(
CallArgList &Args, ArrayRef<QualType> ArgTypes,
llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
- const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
+ const FunctionDecl *CalleeDecl, unsigned ParamsToSkip,
+ EvaluationOrder Order) {
assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
@@ -3191,10 +3235,18 @@ void CodeGenFunction::EmitCallArgs(
};
// We *have* to evaluate arguments from right to left in the MS C++ ABI,
- // because arguments are destroyed left to right in the callee.
- if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
- // Insert a stack save if we're going to need any inalloca args.
- bool HasInAllocaArgs = false;
+ // because arguments are destroyed left to right in the callee. As a special
+ // case, there are certain language constructs that require left-to-right
+ // evaluation, and in those cases we consider the evaluation order requirement
+ // to trump the "destruction order is reverse construction order" guarantee.
+ bool LeftToRight =
+ CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
+ ? Order == EvaluationOrder::ForceLeftToRight
+ : Order != EvaluationOrder::ForceRightToLeft;
+
+ // Insert a stack save if we're going to need any inalloca args.
+ bool HasInAllocaArgs = false;
+ if (CGM.getTarget().getCXXABI().isMicrosoft()) {
for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
I != E && !HasInAllocaArgs; ++I)
HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
@@ -3202,30 +3254,24 @@ void CodeGenFunction::EmitCallArgs(
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
Args.allocateArgumentMemory(*this);
}
+ }
- // Evaluate each argument.
- size_t CallArgsStart = Args.size();
- for (int I = ArgTypes.size() - 1; I >= 0; --I) {
- CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
- MaybeEmitImplicitObjectSize(I, *Arg);
- EmitCallArg(Args, *Arg, ArgTypes[I]);
- EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
- CalleeDecl, ParamsToSkip + I);
- }
+ // Evaluate each argument in the appropriate order.
+ size_t CallArgsStart = Args.size();
+ for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
+ unsigned Idx = LeftToRight ? I : E - I - 1;
+ CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
+ if (!LeftToRight) MaybeEmitImplicitObjectSize(Idx, *Arg);
+ EmitCallArg(Args, *Arg, ArgTypes[Idx]);
+ EmitNonNullArgCheck(Args.back().RV, ArgTypes[Idx], (*Arg)->getExprLoc(),
+ CalleeDecl, ParamsToSkip + Idx);
+ if (LeftToRight) MaybeEmitImplicitObjectSize(Idx, *Arg);
+ }
+ if (!LeftToRight) {
// Un-reverse the arguments we just evaluated so they match up with the LLVM
// IR function.
std::reverse(Args.begin() + CallArgsStart, Args.end());
- return;
- }
-
- for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
- CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
- assert(Arg != ArgRange.end());
- EmitCallArg(Args, *Arg, ArgTypes[I]);
- EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
- CalleeDecl, ParamsToSkip + I);
- MaybeEmitImplicitObjectSize(I, *Arg);
}
}
@@ -3267,7 +3313,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
if (const ObjCIndirectCopyRestoreExpr *CRE
= dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
assert(getLangOpts().ObjCAutoRefCount);
- assert(getContext().hasSameType(E->getType(), type));
+ assert(getContext().hasSameUnqualifiedType(E->getType(), type));
return emitWritebackArg(*this, args, CRE);
}
@@ -3505,21 +3551,22 @@ void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
}
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
- llvm::Value *Callee,
+ const CGCallee &Callee,
ReturnValueSlot ReturnValue,
const CallArgList &CallArgs,
- CGCalleeInfo CalleeInfo,
llvm::Instruction **callOrInvoke) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
+ assert(Callee.isOrdinary());
+
// Handle struct-return functions by passing a pointer to the
// location that we would like to return into.
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
- llvm::FunctionType *IRFuncTy =
- cast<llvm::FunctionType>(
- cast<llvm::PointerType>(Callee->getType())->getElementType());
+ llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
+
+ // 1. Set up the arguments.
// If we're using inalloca, insert the allocation after the stack save.
// FIXME: Do this earlier rather than hacking it in here!
@@ -3579,6 +3626,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Address swiftErrorTemp = Address::invalid();
Address swiftErrorArg = Address::invalid();
+ // Translate all of the arguments as necessary to match the IR lowering.
assert(CallInfo.arg_size() == CallArgs.size() &&
"Mismatch between function signature & arguments.");
unsigned ArgNo = 0;
@@ -3826,6 +3874,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
+ llvm::Value *CalleePtr = Callee.getFunctionPointer();
+
+ // If we're using inalloca, set up that argument.
if (ArgMemory.isValid()) {
llvm::Value *Arg = ArgMemory.getPointer();
if (CallInfo.isVariadic()) {
@@ -3833,10 +3884,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// end up with a variadic prototype and an inalloca call site. In such
// cases, we can't do any parameter mismatch checks. Give up and bitcast
// the callee.
- unsigned CalleeAS =
- cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
- Callee = Builder.CreateBitCast(
- Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
+ unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
+ auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
+ CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
} else {
llvm::Type *LastParamTy =
IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
@@ -3860,39 +3910,57 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
}
- if (!CallArgs.getCleanupsToDeactivate().empty())
- deactivateArgCleanupsBeforeCall(*this, CallArgs);
+ // 2. Prepare the function pointer.
+
+ // If the callee is a bitcast of a non-variadic function to have a
+ // variadic function pointer type, check to see if we can remove the
+ // bitcast. This comes up with unprototyped functions.
+ //
+ // This makes the IR nicer, but more importantly it ensures that we
+ // can inline the function at -O0 if it is marked always_inline.
+ auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
+ llvm::FunctionType *CalleeFT =
+ cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
+ if (!CalleeFT->isVarArg())
+ return Ptr;
+
+ llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
+ if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
+ return Ptr;
+
+ llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
+ if (!OrigFn)
+ return Ptr;
+
+ llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
+
+ // If the original type is variadic, or if any of the component types
+ // disagree, we cannot remove the cast.
+ if (OrigFT->isVarArg() ||
+ OrigFT->getNumParams() != CalleeFT->getNumParams() ||
+ OrigFT->getReturnType() != CalleeFT->getReturnType())
+ return Ptr;
+
+ for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
+ if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
+ return Ptr;
+
+ return OrigFn;
+ };
+ CalleePtr = simplifyVariadicCallee(CalleePtr);
- // If the callee is a bitcast of a function to a varargs pointer to function
- // type, check to see if we can remove the bitcast. This handles some cases
- // with unprototyped functions.
- if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
- if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
- llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
- llvm::FunctionType *CurFT =
- cast<llvm::FunctionType>(CurPT->getElementType());
- llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
-
- if (CE->getOpcode() == llvm::Instruction::BitCast &&
- ActualFT->getReturnType() == CurFT->getReturnType() &&
- ActualFT->getNumParams() == CurFT->getNumParams() &&
- ActualFT->getNumParams() == IRCallArgs.size() &&
- (CurFT->isVarArg() || !ActualFT->isVarArg())) {
- bool ArgsMatch = true;
- for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
- if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
- ArgsMatch = false;
- break;
- }
+ // 3. Perform the actual call.
- // Strip the cast if we can get away with it. This is a nice cleanup,
- // but also allows us to inline the function at -O0 if it is marked
- // always_inline.
- if (ArgsMatch)
- Callee = CalleeF;
- }
- }
+ // Deactivate any cleanups that we're supposed to do immediately before
+ // the call.
+ if (!CallArgs.getCleanupsToDeactivate().empty())
+ deactivateArgCleanupsBeforeCall(*this, CallArgs);
+ // Assert that the arguments we computed match up. The IR verifier
+ // will catch this, but this is a common enough source of problems
+ // during IRGen changes that it's way better for debugging to catch
+ // it ourselves here.
+#ifndef NDEBUG
assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
// Inalloca argument can have different type.
@@ -3902,75 +3970,106 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (i < IRFuncTy->getNumParams())
assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
}
+#endif
+ // Compute the calling convention and attributes.
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
- CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
+ CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
+ Callee.getAbstractInfo(),
AttributeList, CallingConv,
/*AttrOnCallSite=*/true);
llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
AttributeList);
+ // Apply some call-site-specific attributes.
+ // TODO: work this into building the attribute set.
+
+ // Apply always_inline to all calls within flatten functions.
+ // FIXME: should this really take priority over __try, below?
+ if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
+ !(Callee.getAbstractInfo().getCalleeDecl() &&
+ Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
+ Attrs =
+ Attrs.addAttribute(getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::AlwaysInline);
+ }
+
+ // Disable inlining inside SEH __try blocks.
+ if (isSEHTryScope()) {
+ Attrs =
+ Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoInline);
+ }
+
+ // Decide whether to use a call or an invoke.
bool CannotThrow;
if (currentFunctionUsesSEHTry()) {
- // SEH cares about asynchronous exceptions, everything can "throw."
+ // SEH cares about asynchronous exceptions, so everything can "throw."
CannotThrow = false;
} else if (isCleanupPadScope() &&
EHPersonality::get(*this).isMSVCXXPersonality()) {
// The MSVC++ personality will implicitly terminate the program if an
- // exception is thrown. An unwind edge cannot be reached.
+ // exception is thrown during a cleanup outside of a try/catch.
+ // We don't need to model anything in IR to get this behavior.
CannotThrow = true;
} else {
- // Otherwise, nowunind callsites will never throw.
+ // Otherwise, nounwind call sites will never throw.
CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::NoUnwind);
}
llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
SmallVector<llvm::OperandBundleDef, 1> BundleList;
- getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
+ getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList);
+ // Emit the actual call/invoke instruction.
llvm::CallSite CS;
if (!InvokeDest) {
- CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
+ CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
+ CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
BundleList);
EmitBlock(Cont);
}
+ llvm::Instruction *CI = CS.getInstruction();
if (callOrInvoke)
- *callOrInvoke = CS.getInstruction();
-
- if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
- !CS.hasFnAttr(llvm::Attribute::NoInline))
- Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::AlwaysInline);
-
- // Disable inlining inside SEH __try blocks.
- if (isSEHTryScope())
- Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::NoInline);
+ *callOrInvoke = CI;
+ // Apply the attributes and calling convention.
CS.setAttributes(Attrs);
CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+ // Apply various metadata.
+
+ if (!CI->getType()->isVoidTy())
+ CI->setName("call");
+
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of
// IPVK_IndirectCallTarget in InstrProfData.inc.
if (!CS.getCalledFunction())
PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
- CS.getInstruction(), Callee);
+ CI, CalleePtr);
// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
// optimizer it can aggressively ignore unwind edges.
if (CGM.getLangOpts().ObjCAutoRefCount)
- AddObjCARCExceptionMetadata(CS.getInstruction());
+ AddObjCARCExceptionMetadata(CI);
+
+ // Suppress tail calls if requested.
+ if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
+ const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
+ if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
+ Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
+ }
+
+ // 4. Finish the call.
// If the call doesn't return, finish the basic block and clear the
- // insertion point; this allows the rest of IRgen to discard
+ // insertion point; this allows the rest of IRGen to discard
// unreachable code.
if (CS.doesNotReturn()) {
if (UnusedReturnSize)
@@ -3989,18 +4088,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
return GetUndefRValue(RetTy);
}
- llvm::Instruction *CI = CS.getInstruction();
- if (!CI->getType()->isVoidTy())
- CI->setName("call");
-
// Perform the swifterror writeback.
if (swiftErrorTemp.isValid()) {
llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
Builder.CreateStore(errorResult, swiftErrorArg);
}
- // Emit any writebacks immediately. Arguably this should happen
- // after any return-value munging.
+ // Emit any call-associated writebacks immediately. Arguably this
+ // should happen after any return-value munging.
if (CallArgs.hasWritebacks())
emitWritebacks(*this, CallArgs);
@@ -4008,12 +4103,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// lexical order, so deactivate it and run it manually here.
CallArgs.freeArgumentMemory(*this);
- if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
- const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
- if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
- Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
- }
-
+ // Extract the return value.
RValue Ret = [&] {
switch (RetAI.getKind()) {
case ABIArgInfo::CoerceAndExpand: {
@@ -4110,8 +4200,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm_unreachable("Unhandled ABIArgInfo::Kind");
} ();
- const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
-
+ // Emit the assume_aligned check on the return value.
+ const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
if (Ret.isScalar() && TargetDecl) {
if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
llvm::Value *OffsetValue = nullptr;
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 2ebd09b9eb57..031ce831cb37 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -19,7 +19,6 @@
#include "EHScopeStack.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/Type.h"
-#include "llvm/ADT/FoldingSet.h"
#include "llvm/IR/Value.h"
// FIXME: Restructure so we don't have to expose so much stuff.
@@ -42,6 +41,134 @@ namespace clang {
namespace CodeGen {
typedef SmallVector<llvm::AttributeSet, 8> AttributeListType;
+ /// Abstract information about a function or function prototype.
+ class CGCalleeInfo {
+ /// \brief The function prototype of the callee.
+ const FunctionProtoType *CalleeProtoTy;
+ /// \brief The function declaration of the callee.
+ const Decl *CalleeDecl;
+
+ public:
+ explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl(nullptr) {}
+ CGCalleeInfo(const FunctionProtoType *calleeProtoTy, const Decl *calleeDecl)
+ : CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
+ CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
+ : CalleeProtoTy(calleeProtoTy), CalleeDecl(nullptr) {}
+ CGCalleeInfo(const Decl *calleeDecl)
+ : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
+
+ const FunctionProtoType *getCalleeFunctionProtoType() const {
+ return CalleeProtoTy;
+ }
+ const Decl *getCalleeDecl() const { return CalleeDecl; }
+ };
+
+ /// All available information about a concrete callee.
+ class CGCallee {
+ enum class SpecialKind : uintptr_t {
+ Invalid,
+ Builtin,
+ PseudoDestructor,
+
+ Last = PseudoDestructor
+ };
+
+ struct BuiltinInfoStorage {
+ const FunctionDecl *Decl;
+ unsigned ID;
+ };
+ struct PseudoDestructorInfoStorage {
+ const CXXPseudoDestructorExpr *Expr;
+ };
+
+ SpecialKind KindOrFunctionPointer;
+ union {
+ CGCalleeInfo AbstractInfo;
+ BuiltinInfoStorage BuiltinInfo;
+ PseudoDestructorInfoStorage PseudoDestructorInfo;
+ };
+
+ explicit CGCallee(SpecialKind kind) : KindOrFunctionPointer(kind) {}
+
+ CGCallee(const FunctionDecl *builtinDecl, unsigned builtinID)
+ : KindOrFunctionPointer(SpecialKind::Builtin) {
+ BuiltinInfo.Decl = builtinDecl;
+ BuiltinInfo.ID = builtinID;
+ }
+
+ public:
+ CGCallee() : KindOrFunctionPointer(SpecialKind::Invalid) {}
+
+ /// Construct a callee. Call this constructor directly when this
+ /// isn't a direct call.
+ CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr)
+ : KindOrFunctionPointer(SpecialKind(uintptr_t(functionPtr))) {
+ AbstractInfo = abstractInfo;
+ assert(functionPtr && "configuring callee without function pointer");
+ assert(functionPtr->getType()->isPointerTy());
+ assert(functionPtr->getType()->getPointerElementType()->isFunctionTy());
+ }
+
+ static CGCallee forBuiltin(unsigned builtinID,
+ const FunctionDecl *builtinDecl) {
+ CGCallee result(SpecialKind::Builtin);
+ result.BuiltinInfo.Decl = builtinDecl;
+ result.BuiltinInfo.ID = builtinID;
+ return result;
+ }
+
+ static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E) {
+ CGCallee result(SpecialKind::PseudoDestructor);
+ result.PseudoDestructorInfo.Expr = E;
+ return result;
+ }
+
+ static CGCallee forDirect(llvm::Constant *functionPtr,
+ const CGCalleeInfo &abstractInfo = CGCalleeInfo()) {
+ return CGCallee(abstractInfo, functionPtr);
+ }
+
+ bool isBuiltin() const {
+ return KindOrFunctionPointer == SpecialKind::Builtin;
+ }
+ const FunctionDecl *getBuiltinDecl() const {
+ assert(isBuiltin());
+ return BuiltinInfo.Decl;
+ }
+ unsigned getBuiltinID() const {
+ assert(isBuiltin());
+ return BuiltinInfo.ID;
+ }
+
+ bool isPseudoDestructor() const {
+ return KindOrFunctionPointer == SpecialKind::PseudoDestructor;
+ }
+ const CXXPseudoDestructorExpr *getPseudoDestructorExpr() const {
+ assert(isPseudoDestructor());
+ return PseudoDestructorInfo.Expr;
+ }
+
+ bool isOrdinary() const {
+ return uintptr_t(KindOrFunctionPointer) > uintptr_t(SpecialKind::Last);
+ }
+ const CGCalleeInfo &getAbstractInfo() const {
+ assert(isOrdinary());
+ return AbstractInfo;
+ }
+ llvm::Value *getFunctionPointer() const {
+ assert(isOrdinary());
+ return reinterpret_cast<llvm::Value*>(uintptr_t(KindOrFunctionPointer));
+ }
+ llvm::FunctionType *getFunctionType() const {
+ return cast<llvm::FunctionType>(
+ getFunctionPointer()->getType()->getPointerElementType());
+ }
+ void setFunctionPointer(llvm::Value *functionPtr) {
+ assert(isOrdinary());
+ KindOrFunctionPointer = SpecialKind(uintptr_t(functionPtr));
+ }
+ };
+
struct CallArg {
RValue RV;
QualType Ty;
@@ -82,10 +209,19 @@ namespace CodeGen {
push_back(CallArg(rvalue, type, needscopy));
}
+ /// Add all the arguments from another CallArgList to this one. After doing
+ /// this, the old CallArgList retains its list of arguments, but must not
+ /// be used to emit a call.
void addFrom(const CallArgList &other) {
insert(end(), other.begin(), other.end());
Writebacks.insert(Writebacks.end(),
other.Writebacks.begin(), other.Writebacks.end());
+ CleanupsToDeactivate.insert(CleanupsToDeactivate.end(),
+ other.CleanupsToDeactivate.begin(),
+ other.CleanupsToDeactivate.end());
+ assert(!(StackBase && other.StackBase) && "can't merge stackbases");
+ if (!StackBase)
+ StackBase = other.StackBase;
}
void addWriteback(LValue srcLV, Address temporary,
@@ -133,11 +269,6 @@ namespace CodeGen {
/// The stacksave call. It dominates all of the argument evaluation.
llvm::CallInst *StackBase;
-
- /// The iterator pointing to the stack restore cleanup. We manually run and
- /// deactivate this cleanup after the call in the unexceptional case because
- /// it doesn't run in the normal order.
- EHScopeStack::stable_iterator StackCleanup;
};
/// FunctionArgList - Type for representing both the decl and type
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 7ed891f426aa..05d056739524 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -562,105 +562,6 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
isBaseVirtual);
}
-static void EmitAggMemberInitializer(CodeGenFunction &CGF,
- LValue LHS,
- Expr *Init,
- Address ArrayIndexVar,
- QualType T,
- ArrayRef<VarDecl *> ArrayIndexes,
- unsigned Index) {
- if (Index == ArrayIndexes.size()) {
- LValue LV = LHS;
-
- if (ArrayIndexVar.isValid()) {
- // If we have an array index variable, load it and use it as an offset.
- // Then, increment the value.
- llvm::Value *Dest = LHS.getPointer();
- llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
- Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
- llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
- Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
- CGF.Builder.CreateStore(Next, ArrayIndexVar);
-
- // Update the LValue.
- CharUnits EltSize = CGF.getContext().getTypeSizeInChars(T);
- CharUnits Align = LV.getAlignment().alignmentOfArrayElement(EltSize);
- LV.setAddress(Address(Dest, Align));
- }
-
- switch (CGF.getEvaluationKind(T)) {
- case TEK_Scalar:
- CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false);
- break;
- case TEK_Complex:
- CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
- break;
- case TEK_Aggregate: {
- AggValueSlot Slot =
- AggValueSlot::forLValue(LV,
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
-
- CGF.EmitAggExpr(Init, Slot);
- break;
- }
- }
-
- return;
- }
-
- const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
- assert(Array && "Array initialization without the array type?");
- Address IndexVar = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
-
- // Initialize this index variable to zero.
- llvm::Value* Zero
- = llvm::Constant::getNullValue(IndexVar.getElementType());
- CGF.Builder.CreateStore(Zero, IndexVar);
-
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
-
- CGF.EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
- // Generate: if (loop-index < number-of-elements) fall to the loop body,
- // otherwise, go to the block after the for-loop.
- uint64_t NumElements = Array->getSize().getZExtValue();
- llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
- llvm::Value *NumElementsPtr =
- llvm::ConstantInt::get(Counter->getType(), NumElements);
- llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
- "isless");
-
- // If the condition is true, execute the body.
- CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
- CGF.EmitBlock(ForBody);
- llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
-
- // Inside the loop body recurse to emit the inner loop or, eventually, the
- // constructor call.
- EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
- Array->getElementType(), ArrayIndexes, Index + 1);
-
- CGF.EmitBlock(ContinueBlock);
-
- // Emit the increment of the loop counter.
- llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
- Counter = CGF.Builder.CreateLoad(IndexVar);
- NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
- CGF.Builder.CreateStore(NextVal, IndexVar);
-
- // Finally, branch back up to the condition for the next iteration.
- CGF.EmitBranch(CondBlock);
-
- // Emit the fall-through block.
- CGF.EmitBlock(AfterFor, true);
-}
-
static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) {
auto *CD = dyn_cast<CXXConstructorDecl>(D);
if (!(CD && CD->isCopyOrMoveConstructor()) &&
@@ -744,14 +645,11 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
}
}
- ArrayRef<VarDecl *> ArrayIndexes;
- if (MemberInit->getNumArrayIndices())
- ArrayIndexes = MemberInit->getArrayIndices();
- CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
+ CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit());
}
void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
- Expr *Init, ArrayRef<VarDecl *> ArrayIndexes) {
+ Expr *Init) {
QualType FieldType = Field->getType();
switch (getEvaluationKind(FieldType)) {
case TEK_Scalar:
@@ -766,30 +664,13 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
break;
case TEK_Aggregate: {
- Address ArrayIndexVar = Address::invalid();
- if (ArrayIndexes.size()) {
- // The LHS is a pointer to the first object we'll be constructing, as
- // a flat array.
- QualType BaseElementTy = getContext().getBaseElementType(FieldType);
- llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- Address BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
-
- // Create an array index that will be used to walk over all of the
- // objects we're constructing.
- ArrayIndexVar = CreateMemTemp(getContext().getSizeType(), "object.index");
- llvm::Value *Zero =
- llvm::Constant::getNullValue(ArrayIndexVar.getElementType());
- Builder.CreateStore(Zero, ArrayIndexVar);
-
- // Emit the block variables for the array indices, if any.
- for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
- EmitAutoVarDecl(*ArrayIndexes[I]);
- }
-
- EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
- ArrayIndexes, 0);
+ AggValueSlot Slot =
+ AggValueSlot::forLValue(LHS,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(Init, Slot);
+ break;
}
}
@@ -2146,10 +2027,12 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
*this, D, Type, ForVirtualBase, Delegating, Args);
// Emit the call.
- llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
+ llvm::Constant *CalleePtr =
+ CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
const CGFunctionInfo &Info =
- CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
- EmitCall(Info, Callee, ReturnValueSlot(), Args, D);
+ CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, D);
+ EmitCall(Info, Callee, ReturnValueSlot(), Args);
// Generate vtable assumptions if we're constructing a complete object
// with a vtable. We don't do this for base subobjects for two reasons:
@@ -2765,8 +2648,8 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
llvm::Value *ValidVtable = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedVTable, AllVtables});
- EmitCheck(std::make_pair(TypeTest, M), "cfi_check_fail", StaticData,
- {CastedVTable, ValidVtable});
+ EmitCheck(std::make_pair(TypeTest, M), SanitizerHandler::CFICheckFail,
+ StaticData, {CastedVTable, ValidVtable});
}
bool CodeGenFunction::ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) {
@@ -2798,38 +2681,13 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
llvm::Value *CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIVCall),
- "cfi_check_fail", nullptr, nullptr);
+ SanitizerHandler::CFICheckFail, nullptr, nullptr);
return Builder.CreateBitCast(
Builder.CreateExtractValue(CheckedLoad, 0),
cast<llvm::PointerType>(VTable->getType())->getElementType());
}
-// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
-// quite what we want.
-static const Expr *skipNoOpCastsAndParens(const Expr *E) {
- while (true) {
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
- E = PE->getSubExpr();
- continue;
- }
-
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if (CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- }
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
- if (UO->getOpcode() == UO_Extension) {
- E = UO->getSubExpr();
- continue;
- }
- }
- return E;
- }
-}
-
bool
CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
const CXXMethodDecl *MD) {
@@ -2838,31 +2696,41 @@ CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
if (getLangOpts().AppleKext)
return false;
- // If the most derived class is marked final, we know that no subclass can
- // override this member function and so we can devirtualize it. For example:
- //
- // struct A { virtual void f(); }
- // struct B final : A { };
- //
- // void f(B *b) {
- // b->f();
- // }
- //
- const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
- if (MostDerivedClassDecl->hasAttr<FinalAttr>())
- return true;
-
// If the member function is marked 'final', we know that it can't be
- // overridden and can therefore devirtualize it.
+ // overridden and can therefore devirtualize it unless it's pure virtual.
if (MD->hasAttr<FinalAttr>())
+ return !MD->isPure();
+
+ // If the base expression (after skipping derived-to-base conversions) is a
+ // class prvalue, then we can devirtualize.
+ Base = Base->getBestDynamicClassTypeExpr();
+ if (Base->isRValue() && Base->getType()->isRecordType())
+ return true;
+
+ // If we don't even know what we would call, we can't devirtualize.
+ const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
+ if (!BestDynamicDecl)
+ return false;
+
+ // There may be a method corresponding to MD in a derived class.
+ const CXXMethodDecl *DevirtualizedMethod =
+ MD->getCorrespondingMethodInClass(BestDynamicDecl);
+
+ // If that method is pure virtual, we can't devirtualize. If this code is
+ // reached, the result would be UB, not a direct call to the derived class
+ // function, and we can't assume the derived class function is defined.
+ if (DevirtualizedMethod->isPure())
+ return false;
+
+ // If that method is marked final, we can devirtualize it.
+ if (DevirtualizedMethod->hasAttr<FinalAttr>())
return true;
// Similarly, if the class itself is marked 'final' it can't be overridden
// and we can therefore devirtualize the member function call.
- if (MD->getParent()->hasAttr<FinalAttr>())
+ if (BestDynamicDecl->hasAttr<FinalAttr>())
return true;
- Base = skipNoOpCastsAndParens(Base);
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
// This is a record decl. We know the type and can devirtualize it.
@@ -2879,17 +2747,15 @@ CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
return VD->getType()->isRecordType();
- // We can always devirtualize calls on temporary object expressions.
- if (isa<CXXConstructExpr>(Base))
- return true;
-
- // And calls on bound temporaries.
- if (isa<CXXBindTemporaryExpr>(Base))
- return true;
-
- // Check if this is a call expr that returns a record type.
- if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
- return CE->getCallReturnType(getContext())->isRecordType();
+ // Likewise for calls on an object accessed by a (non-reference) pointer to
+ // member access.
+ if (auto *BO = dyn_cast<BinaryOperator>(Base)) {
+ if (BO->isPtrMemOp()) {
+ auto *MPT = BO->getRHS()->getType()->castAs<MemberPointerType>();
+ if (MPT->getPointeeType()->isRecordType())
+ return true;
+ }
+ }
// We can't devirtualize the call.
return false;
@@ -2901,7 +2767,7 @@ void CodeGenFunction::EmitForwardingCallToLambda(
// Get the address of the call operator.
const CGFunctionInfo &calleeFnInfo =
CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
- llvm::Value *callee =
+ llvm::Constant *calleePtr =
CGM.GetAddrOfFunction(GlobalDecl(callOperator),
CGM.getTypes().GetFunctionType(calleeFnInfo));
@@ -2920,8 +2786,8 @@ void CodeGenFunction::EmitForwardingCallToLambda(
// variadic arguments.
// Now emit our call.
- RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
- callArgs, callOperator);
+ auto callee = CGCallee::forDirect(calleePtr, callOperator);
+ RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs);
// If necessary, copy the returned value into the slot.
if (!resultType->isVoidType() && returnSlot.isNull())
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index b3278b3b4fef..3666858e63d2 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -445,7 +445,7 @@ CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old,
for (size_t I = OldLifetimeExtendedSize,
E = LifetimeExtendedCleanupStack.size(); I != E; /**/) {
// Alignment should be guaranteed by the vptrs in the individual cleanups.
- assert((I % llvm::alignOf<LifetimeExtendedCleanupHeader>() == 0) &&
+ assert((I % alignof(LifetimeExtendedCleanupHeader) == 0) &&
"misaligned cleanup stack entry");
LifetimeExtendedCleanupHeader &Header =
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index 98d01b1326c9..2166490ec1fd 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -427,8 +427,7 @@ public:
// EHCleanupScope ought to have alignment equal to that -- not more
// (would be misaligned by the stack allocator), and not less (would
// break the appended classes).
-static_assert(llvm::AlignOf<EHCleanupScope>::Alignment ==
- EHScopeStack::ScopeStackAlignment,
+static_assert(alignof(EHCleanupScope) == EHScopeStack::ScopeStackAlignment,
"EHCleanupScope expected alignment");
/// An exceptions scope which filters exceptions thrown through it.
diff --git a/lib/CodeGen/CGCoroutine.cpp b/lib/CodeGen/CGCoroutine.cpp
new file mode 100644
index 000000000000..2fdb1279ece9
--- /dev/null
+++ b/lib/CodeGen/CGCoroutine.cpp
@@ -0,0 +1,116 @@
+//===----- CGCoroutine.cpp - Emit LLVM Code for C++ coroutines ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of coroutines.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtCXX.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace clang {
+namespace CodeGen {
+
+struct CGCoroData {
+ // Stores the llvm.coro.id emitted in the function so that we can supply it
+ // as the first argument to coro.begin, coro.alloc and coro.free intrinsics.
+ // Note: llvm.coro.id returns a token that cannot be directly expressed in a
+ // builtin.
+ llvm::CallInst *CoroId = nullptr;
+ // If coro.id came from the builtin, remember the expression to give better
+ // diagnostic. If CoroIdExpr is nullptr, the coro.id was created by
+ // EmitCoroutineBody.
+ CallExpr const *CoroIdExpr = nullptr;
+};
+}
+}
+
+clang::CodeGen::CodeGenFunction::CGCoroInfo::CGCoroInfo() {}
+CodeGenFunction::CGCoroInfo::~CGCoroInfo() {}
+
+static void createCoroData(CodeGenFunction &CGF,
+ CodeGenFunction::CGCoroInfo &CurCoro,
+ llvm::CallInst *CoroId,
+ CallExpr const *CoroIdExpr = nullptr) {
+ if (CurCoro.Data) {
+ if (CurCoro.Data->CoroIdExpr)
+ CGF.CGM.Error(CoroIdExpr->getLocStart(),
+ "only one __builtin_coro_id can be used in a function");
+ else if (CoroIdExpr)
+ CGF.CGM.Error(CoroIdExpr->getLocStart(),
+ "__builtin_coro_id shall not be used in a C++ coroutine");
+ else
+ llvm_unreachable("EmitCoroutineBodyStatement called twice?");
+
+ return;
+ }
+
+ CurCoro.Data = std::unique_ptr<CGCoroData>(new CGCoroData);
+ CurCoro.Data->CoroId = CoroId;
+ CurCoro.Data->CoroIdExpr = CoroIdExpr;
+}
+
+void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
+ auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getInt8PtrTy());
+ auto &TI = CGM.getContext().getTargetInfo();
+ unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth();
+
+ auto *CoroId = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::coro_id),
+ {Builder.getInt32(NewAlign), NullPtr, NullPtr, NullPtr});
+ createCoroData(*this, CurCoro, CoroId);
+
+ EmitScalarExpr(S.getAllocate());
+ // FIXME: Emit the rest of the coroutine.
+ EmitStmt(S.getDeallocate());
+}
+
+// Emit coroutine intrinsic and patch up arguments of the token type.
+RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
+ unsigned int IID) {
+ SmallVector<llvm::Value *, 8> Args;
+ switch (IID) {
+ default:
+ break;
+ // The following three intrinsics take a token parameter referring to a token
+ // returned by earlier call to @llvm.coro.id. Since we cannot represent it in
+ // builtins, we patch it up here.
+ case llvm::Intrinsic::coro_alloc:
+ case llvm::Intrinsic::coro_begin:
+ case llvm::Intrinsic::coro_free: {
+ if (CurCoro.Data && CurCoro.Data->CoroId) {
+ Args.push_back(CurCoro.Data->CoroId);
+ break;
+ }
+ CGM.Error(E->getLocStart(), "this builtin expect that __builtin_coro_id has"
+ " been used earlier in this function");
+ // Fallthrough to the next case to add TokenNone as the first argument.
+ }
+ // @llvm.coro.suspend takes a token parameter. Add token 'none' as the first
+ // argument.
+ case llvm::Intrinsic::coro_suspend:
+ Args.push_back(llvm::ConstantTokenNone::get(getLLVMContext()));
+ break;
+ }
+ for (auto &Arg : E->arguments())
+ Args.push_back(EmitScalarExpr(Arg));
+
+ llvm::Value *F = CGM.getIntrinsic(IID);
+ llvm::CallInst *Call = Builder.CreateCall(F, Args);
+
+ // If we see @llvm.coro.id remember it in the CoroData. We will update
+ // coro.alloc, coro.begin and coro.free intrinsics to refer to it.
+ if (IID == llvm::Intrinsic::coro_id) {
+ createCoroData(*this, CurCoro, Call, E);
+ }
+ return RValue::get(Call);
+}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 0607a5157a6f..12a68036b09c 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -13,9 +13,9 @@
#include "CGDebugInfo.h"
#include "CGBlocks.h"
-#include "CGRecordLayout.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
+#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
@@ -31,6 +31,7 @@
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/ModuleMap.h"
#include "clang/Lex/PreprocessorOptions.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Constants.h"
@@ -40,10 +41,24 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
using namespace clang;
using namespace clang::CodeGen;
+static uint32_t getTypeAlignIfRequired(const Type *Ty, const ASTContext &Ctx) {
+ auto TI = Ctx.getTypeInfo(Ty);
+ return TI.AlignIsRequired ? TI.Align : 0;
+}
+
+static uint32_t getTypeAlignIfRequired(QualType Ty, const ASTContext &Ctx) {
+ return getTypeAlignIfRequired(Ty.getTypePtr(), Ctx);
+}
+
+static uint32_t getDeclAlignIfRequired(const Decl *D, const ASTContext &Ctx) {
+ return D->hasAttr<AlignedAttr>() ? D->getMaxAlignment() : 0;
+}
+
CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
: CGM(CGM), DebugKind(CGM.getCodeGenOpts().getDebugInfo()),
DebugTypeExtRefs(CGM.getCodeGenOpts().DebugTypeExtRefs),
@@ -306,11 +321,36 @@ StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
return StringRef();
}
+llvm::DIFile::ChecksumKind
+CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const {
+ Checksum.clear();
+
+ if (!CGM.getCodeGenOpts().EmitCodeView)
+ return llvm::DIFile::CSK_None;
+
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ bool Invalid;
+ llvm::MemoryBuffer *MemBuffer = SM.getBuffer(FID, &Invalid);
+ if (Invalid)
+ return llvm::DIFile::CSK_None;
+
+ llvm::MD5 Hash;
+ llvm::MD5::MD5Result Result;
+
+ Hash.update(MemBuffer->getBuffer());
+ Hash.final(Result);
+
+ Hash.stringifyResult(Result, Checksum);
+ return llvm::DIFile::CSK_MD5;
+}
+
llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (!Loc.isValid())
// If Location is not valid then use main input file.
return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
- remapDIPath(TheCU->getDirectory()));
+ remapDIPath(TheCU->getDirectory()),
+ TheCU->getFile()->getChecksumKind(),
+ TheCU->getFile()->getChecksum());
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
@@ -318,7 +358,9 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty())
// If the location is not valid then use main input file.
return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
- remapDIPath(TheCU->getDirectory()));
+ remapDIPath(TheCU->getDirectory()),
+ TheCU->getFile()->getChecksumKind(),
+ TheCU->getFile()->getChecksum());
// Cache the results.
const char *fname = PLoc.getFilename();
@@ -330,8 +372,13 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
return cast<llvm::DIFile>(V);
}
+ SmallString<32> Checksum;
+ llvm::DIFile::ChecksumKind CSKind =
+ computeChecksum(SM.getFileID(Loc), Checksum);
+
llvm::DIFile *F = DBuilder.createFile(remapDIPath(PLoc.getFilename()),
- remapDIPath(getCurrentDirname()));
+ remapDIPath(getCurrentDirname()),
+ CSKind, Checksum);
DIFileCache[fname].reset(F);
return F;
@@ -339,7 +386,9 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
llvm::DIFile *CGDebugInfo::getOrCreateMainFile() {
return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
- remapDIPath(TheCU->getDirectory()));
+ remapDIPath(TheCU->getDirectory()),
+ TheCU->getFile()->getChecksumKind(),
+ TheCU->getFile()->getChecksum());
}
std::string CGDebugInfo::remapDIPath(StringRef Path) const {
@@ -382,6 +431,8 @@ StringRef CGDebugInfo::getCurrentDirname() {
}
void CGDebugInfo::CreateCompileUnit() {
+ SmallString<32> Checksum;
+ llvm::DIFile::ChecksumKind CSKind = llvm::DIFile::CSK_None;
// Should we be asking the SourceManager for the main file name, instead of
// accepting it as an argument? This just causes the main file name to
@@ -408,6 +459,7 @@ void CGDebugInfo::CreateCompileUnit() {
llvm::sys::path::append(MainFileDirSS, MainFileName);
MainFileName = MainFileDirSS.str();
}
+ CSKind = computeChecksum(SM.getMainFileID(), Checksum);
}
llvm::dwarf::SourceLanguage LangTag;
@@ -452,9 +504,12 @@ void CGDebugInfo::CreateCompileUnit() {
// Create new compile unit.
// FIXME - Eliminate TheCU.
TheCU = DBuilder.createCompileUnit(
- LangTag, remapDIPath(MainFileName), remapDIPath(getCurrentDirname()),
+ LangTag, DBuilder.createFile(remapDIPath(MainFileName),
+ remapDIPath(getCurrentDirname()), CSKind,
+ Checksum),
Producer, LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers,
- CGM.getCodeGenOpts().SplitDwarfFile, EmissionKind, 0 /* DWOid */);
+ CGM.getCodeGenOpts().SplitDwarfFile, EmissionKind, 0 /* DWOid */,
+ CGM.getCodeGenOpts().SplitDwarfInlining);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
@@ -494,14 +549,14 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
auto *ISATy = DBuilder.createPointerType(ClassTy, Size);
- ObjTy =
- DBuilder.createStructType(TheCU, "objc_object", getOrCreateMainFile(),
- 0, 0, 0, 0, nullptr, llvm::DINodeArray());
+ ObjTy = DBuilder.createStructType(
+ TheCU, "objc_object", getOrCreateMainFile(), 0, 0, 0,
+ llvm::DINode::FlagZero, nullptr, llvm::DINodeArray());
DBuilder.replaceArrays(
- ObjTy,
- DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
- ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0, 0, ISATy)));
+ ObjTy, DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
+ ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0,
+ llvm::DINode::FlagZero, ISATy)));
return ObjTy;
}
case BuiltinType::ObjCSel: {
@@ -518,9 +573,8 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
SingletonId);
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLSampler:
- return DBuilder.createBasicType(
- "opencl_sampler_t", CGM.getContext().getTypeSize(BT),
- CGM.getContext().getTypeAlign(BT), llvm::dwarf::DW_ATE_unsigned);
+ return getOrCreateStructPtrType("opencl_sampler_t",
+ OCLSamplerDITy);
case BuiltinType::OCLEvent:
return getOrCreateStructPtrType("opencl_event_t", OCLEventDITy);
case BuiltinType::OCLClkEvent:
@@ -594,21 +648,19 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
BTName = BT->getName(CGM.getLangOpts());
break;
}
- // Bit size, align and offset of the type.
+ // Bit size and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(BT);
- uint64_t Align = CGM.getContext().getTypeAlign(BT);
- return DBuilder.createBasicType(BTName, Size, Align, Encoding);
+ return DBuilder.createBasicType(BTName, Size, Encoding);
}
llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) {
- // Bit size, align and offset of the type.
+ // Bit size and offset of the type.
llvm::dwarf::TypeKind Encoding = llvm::dwarf::DW_ATE_complex_float;
if (Ty->isComplexIntegerType())
Encoding = llvm::dwarf::DW_ATE_lo_user;
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- return DBuilder.createBasicType("complex", Size, Align, Encoding);
+ return DBuilder.createBasicType("complex", Size, Encoding);
}
llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty,
@@ -721,13 +773,7 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
StringRef RDName = getClassName(RD);
uint64_t Size = 0;
- uint64_t Align = 0;
-
- const RecordDecl *D = RD->getDefinition();
- if (D && D->isCompleteDefinition()) {
- Size = CGM.getContext().getTypeSize(Ty);
- Align = CGM.getContext().getTypeAlign(Ty);
- }
+ uint32_t Align = 0;
// Create the type.
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
@@ -749,7 +795,7 @@ llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
// because that does not return the correct value for references.
unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
uint64_t Size = CGM.getTarget().getPointerWidth(AS);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
if (Tag == llvm::dwarf::DW_TAG_reference_type ||
Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
@@ -776,7 +822,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
SmallVector<llvm::Metadata *, 8> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
- unsigned FieldAlign;
+ uint32_t FieldAlign;
llvm::DINodeArray Elements;
FieldOffset = 0;
@@ -787,7 +833,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
Elements = DBuilder.getOrCreateArray(EltTys);
EltTys.clear();
- unsigned Flags = llvm::DINode::FlagAppleBlock;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagAppleBlock;
unsigned LineNo = 0;
auto *EltTy =
@@ -811,9 +857,9 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
FieldSize = CGM.getContext().getTypeSize(Ty);
FieldAlign = CGM.getContext().getTypeAlign(Ty);
- EltTys.push_back(DBuilder.createMemberType(Unit, "__descriptor", nullptr, LineNo,
- FieldSize, FieldAlign, FieldOffset,
- 0, DescTy));
+ EltTys.push_back(DBuilder.createMemberType(
+ Unit, "__descriptor", nullptr, LineNo, FieldSize, FieldAlign, FieldOffset,
+ llvm::DINode::FlagZero, DescTy));
FieldOffset += FieldSize;
Elements = DBuilder.getOrCreateArray(EltTys);
@@ -893,6 +939,7 @@ static unsigned getDwarfCC(CallingConv CC) {
case CC_Swift:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_X86RegCall:
return 0;
}
return 0;
@@ -917,14 +964,15 @@ llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
}
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
- return DBuilder.createSubroutineType(EltTypeArray, 0,
+ return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero,
getDwarfCC(Ty->getCallConv()));
}
/// Convert an AccessSpecifier into the corresponding DINode flag.
/// As an optimization, return 0 if the access specifier equals the
/// default for the containing type.
-static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) {
+static llvm::DINode::DIFlags getAccessFlag(AccessSpecifier Access,
+ const RecordDecl *RD) {
AccessSpecifier Default = clang::AS_none;
if (RD && RD->isClass())
Default = clang::AS_private;
@@ -932,7 +980,7 @@ static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) {
Default = clang::AS_public;
if (Access == Default)
- return 0;
+ return llvm::DINode::FlagZero;
switch (Access) {
case clang::AS_private:
@@ -942,7 +990,7 @@ static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) {
case clang::AS_public:
return llvm::DINode::FlagPublic;
case clang::AS_none:
- return 0;
+ return llvm::DINode::FlagZero;
}
llvm_unreachable("unexpected access enumerator");
}
@@ -964,21 +1012,20 @@ llvm::DIType *CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl,
CGM.getTypes().getCGRecordLayout(RD).getBitFieldInfo(BitFieldDecl);
uint64_t SizeInBits = BitFieldInfo.Size;
assert(SizeInBits > 0 && "found named 0-width bitfield");
- unsigned AlignInBits = CGM.getContext().getTypeAlign(Ty);
uint64_t StorageOffsetInBits =
CGM.getContext().toBits(BitFieldInfo.StorageOffset);
uint64_t OffsetInBits = StorageOffsetInBits + BitFieldInfo.Offset;
- unsigned Flags = getAccessFlag(BitFieldDecl->getAccess(), RD);
+ llvm::DINode::DIFlags Flags = getAccessFlag(BitFieldDecl->getAccess(), RD);
return DBuilder.createBitFieldMemberType(
- RecordTy, Name, File, Line, SizeInBits, AlignInBits, OffsetInBits,
- StorageOffsetInBits, Flags, DebugType);
+ RecordTy, Name, File, Line, SizeInBits, OffsetInBits, StorageOffsetInBits,
+ Flags, DebugType);
}
llvm::DIType *
CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc,
AccessSpecifier AS, uint64_t offsetInBits,
- llvm::DIFile *tunit, llvm::DIScope *scope,
- const RecordDecl *RD) {
+ uint32_t AlignInBits, llvm::DIFile *tunit,
+ llvm::DIScope *scope, const RecordDecl *RD) {
llvm::DIType *debugType = getOrCreateType(type, tunit);
// Get the location for the field.
@@ -986,16 +1033,17 @@ CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc,
unsigned line = getLineNumber(loc);
uint64_t SizeInBits = 0;
- unsigned AlignInBits = 0;
+ auto Align = AlignInBits;
if (!type->isIncompleteArrayType()) {
TypeInfo TI = CGM.getContext().getTypeInfo(type);
SizeInBits = TI.Width;
- AlignInBits = TI.Align;
+ if (!Align)
+ Align = getTypeAlignIfRequired(type, CGM.getContext());
}
- unsigned flags = getAccessFlag(AS, RD);
+ llvm::DINode::DIFlags flags = getAccessFlag(AS, RD);
return DBuilder.createMemberType(scope, name, file, line, SizeInBits,
- AlignInBits, offsetInBits, flags, debugType);
+ Align, offsetInBits, flags, debugType);
}
void CGDebugInfo::CollectRecordLambdaFields(
@@ -1017,9 +1065,10 @@ void CGDebugInfo::CollectRecordLambdaFields(
VarDecl *V = C.getCapturedVar();
StringRef VName = V->getName();
llvm::DIFile *VUnit = getOrCreateFile(Loc);
+ auto Align = getDeclAlignIfRequired(V, CGM.getContext());
llvm::DIType *FieldType = createFieldType(
VName, Field->getType(), Loc, Field->getAccess(),
- layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl);
+ layout.getFieldOffset(fieldno), Align, VUnit, RecordTy, CXXDecl);
elements.push_back(FieldType);
} else if (C.capturesThis()) {
// TODO: Need to handle 'this' in some way by probably renaming the
@@ -1060,9 +1109,10 @@ CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, llvm::DIType *RecordTy,
}
}
- unsigned Flags = getAccessFlag(Var->getAccess(), RD);
+ llvm::DINode::DIFlags Flags = getAccessFlag(Var->getAccess(), RD);
+ auto Align = getDeclAlignIfRequired(Var, CGM.getContext());
llvm::DIDerivedType *GV = DBuilder.createStaticMemberType(
- RecordTy, VName, VUnit, LineNumber, VTy, Flags, C);
+ RecordTy, VName, VUnit, LineNumber, VTy, Flags, C, Align);
StaticDataMemberCache[Var->getCanonicalDecl()].reset(GV);
return GV;
}
@@ -1082,14 +1132,26 @@ void CGDebugInfo::CollectRecordNormalField(
if (field->isBitField()) {
FieldType = createBitFieldType(field, RecordTy, RD);
} else {
+ auto Align = getDeclAlignIfRequired(field, CGM.getContext());
FieldType =
createFieldType(name, type, field->getLocation(), field->getAccess(),
- OffsetInBits, tunit, RecordTy, RD);
+ OffsetInBits, Align, tunit, RecordTy, RD);
}
elements.push_back(FieldType);
}
+void CGDebugInfo::CollectRecordNestedRecord(
+ const RecordDecl *RD, SmallVectorImpl<llvm::Metadata *> &elements) {
+ QualType Ty = CGM.getContext().getTypeDeclType(RD);
+ // Injected class names are not considered nested records.
+ if (isa<InjectedClassNameType>(Ty))
+ return;
+ SourceLocation Loc = RD->getLocation();
+ llvm::DIType *nestedType = getOrCreateType(Ty, getOrCreateFile(Loc));
+ elements.push_back(nestedType);
+}
+
void CGDebugInfo::CollectRecordFields(
const RecordDecl *record, llvm::DIFile *tunit,
SmallVectorImpl<llvm::Metadata *> &elements,
@@ -1101,6 +1163,10 @@ void CGDebugInfo::CollectRecordFields(
else {
const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
+ // Debug info for nested records is included in the member list only for
+ // CodeView.
+ bool IncludeNestedRecords = CGM.getCodeGenOpts().EmitCodeView;
+
// Field number for non-static fields.
unsigned fieldNo = 0;
@@ -1126,7 +1192,10 @@ void CGDebugInfo::CollectRecordFields(
// Bump field number for next field.
++fieldNo;
- }
+ } else if (const auto *nestedRec = dyn_cast<CXXRecordDecl>(I))
+ if (IncludeNestedRecords && !nestedRec->isImplicit() &&
+ nestedRec->getDeclContext() == record)
+ CollectRecordNestedRecord(nestedRec, elements);
}
}
@@ -1162,7 +1231,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
QualType PointeeTy = ThisPtrTy->getPointeeType();
unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
uint64_t Size = CGM.getTarget().getPointerWidth(AS);
- uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
+ auto Align = getTypeAlignIfRequired(ThisPtrTy, CGM.getContext());
llvm::DIType *PointeeType = getOrCreateType(PointeeTy, Unit);
llvm::DIType *ThisPtrType =
DBuilder.createPointerType(PointeeType, Size, Align);
@@ -1185,7 +1254,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (Func->getExtProtoInfo().RefQualifier == RQ_LValue)
Flags |= llvm::DINode::FlagLValueReference;
if (Func->getExtProtoInfo().RefQualifier == RQ_RValue)
@@ -1236,7 +1305,7 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
llvm::DIType *ContainingType = nullptr;
unsigned Virtuality = 0;
unsigned VIndex = 0;
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
int ThisAdjustment = 0;
if (Method->isVirtual()) {
@@ -1347,13 +1416,33 @@ void CGDebugInfo::CollectCXXMemberFunctions(
void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile *Unit,
SmallVectorImpl<llvm::Metadata *> &EltTys,
llvm::DIType *RecordTy) {
- const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- for (const auto &BI : RD->bases()) {
- unsigned BFlags = 0;
- uint64_t BaseOffset;
+ llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> SeenTypes;
+ CollectCXXBasesAux(RD, Unit, EltTys, RecordTy, RD->bases(), SeenTypes,
+ llvm::DINode::FlagZero);
+
+ // If we are generating CodeView debug info, we also need to emit records for
+ // indirect virtual base classes.
+ if (CGM.getCodeGenOpts().EmitCodeView) {
+ CollectCXXBasesAux(RD, Unit, EltTys, RecordTy, RD->vbases(), SeenTypes,
+ llvm::DINode::FlagIndirectVirtualBase);
+ }
+}
+void CGDebugInfo::CollectCXXBasesAux(
+ const CXXRecordDecl *RD, llvm::DIFile *Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType *RecordTy,
+ const CXXRecordDecl::base_class_const_range &Bases,
+ llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> &SeenTypes,
+ llvm::DINode::DIFlags StartingFlags) {
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ for (const auto &BI : Bases) {
const auto *Base =
cast<CXXRecordDecl>(BI.getType()->getAs<RecordType>()->getDecl());
+ if (!SeenTypes.insert(Base).second)
+ continue;
+ auto *BaseTy = getOrCreateType(BI.getType(), Unit);
+ llvm::DINode::DIFlags BFlags = StartingFlags;
+ uint64_t BaseOffset;
if (BI.isVirtual()) {
if (CGM.getTarget().getCXXABI().isItaniumFamily()) {
@@ -1368,15 +1457,15 @@ void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile *Unit,
BaseOffset =
4 * CGM.getMicrosoftVTableContext().getVBTableIndex(RD, Base);
}
- BFlags = llvm::DINode::FlagVirtual;
+ BFlags |= llvm::DINode::FlagVirtual;
} else
BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base));
// FIXME: Inconsistent units for BaseOffset. It is in bytes when
// BI->isVirtual() and bits when not.
BFlags |= getAccessFlag(BI.getAccessSpecifier(), RD);
- llvm::DIType *DTy = DBuilder.createInheritance(
- RecordTy, getOrCreateType(BI.getType(), Unit), BaseOffset, BFlags);
+ llvm::DIType *DTy =
+ DBuilder.createInheritance(RecordTy, BaseTy, BaseOffset, BFlags);
EltTys.push_back(DTy);
}
}
@@ -1531,22 +1620,56 @@ StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
}
void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit,
- SmallVectorImpl<llvm::Metadata *> &EltTys) {
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
+ llvm::DICompositeType *RecordTy) {
+ // If this class is not dynamic then there is not any vtable info to collect.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Don't emit any vtable shape or vptr info if this class doesn't have an
+ // extendable vfptr. This can happen if the class doesn't have virtual
+ // methods, or in the MS ABI if those virtual methods only come from virtually
+ // inherited bases.
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (!RL.hasExtendableVFPtr())
+ return;
- // If there is a primary base then it will hold vtable info.
+ // CodeView needs to know how large the vtable of every dynamic class is, so
+ // emit a special named pointer type into the element list. The vptr type
+ // points to this type as well.
+ llvm::DIType *VPtrTy = nullptr;
+ bool NeedVTableShape = CGM.getCodeGenOpts().EmitCodeView &&
+ CGM.getTarget().getCXXABI().isMicrosoft();
+ if (NeedVTableShape) {
+ uint64_t PtrWidth =
+ CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+ const VTableLayout &VFTLayout =
+ CGM.getMicrosoftVTableContext().getVFTableLayout(RD, CharUnits::Zero());
+ unsigned VSlotCount =
+ VFTLayout.vtable_components().size() - CGM.getLangOpts().RTTIData;
+ unsigned VTableWidth = PtrWidth * VSlotCount;
+
+ // Create a very wide void* type and insert it directly in the element list.
+ llvm::DIType *VTableType =
+ DBuilder.createPointerType(nullptr, VTableWidth, 0, "__vtbl_ptr_type");
+ EltTys.push_back(VTableType);
+
+ // The vptr is a pointer to this special vtable type.
+ VPtrTy = DBuilder.createPointerType(VTableType, PtrWidth);
+ }
+
+ // If there is a primary base then the artificial vptr member lives there.
if (RL.getPrimaryBase())
return;
- // If this class is not dynamic then there is not any vtable info to collect.
- if (!RD->isDynamicClass())
- return;
+ if (!VPtrTy)
+ VPtrTy = getOrCreateVTablePtrType(Unit);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
- llvm::DIType *VPTR = DBuilder.createMemberType(
+ llvm::DIType *VPtrMember = DBuilder.createMemberType(
Unit, getVTableName(RD), Unit, 0, Size, 0, 0,
- llvm::DINode::FlagArtificial, getOrCreateVTablePtrType(Unit));
- EltTys.push_back(VPTR);
+ llvm::DINode::FlagArtificial, VPtrTy);
+ EltTys.push_back(VPtrMember);
}
llvm::DIType *CGDebugInfo::getOrCreateRecordType(QualType RTy,
@@ -1591,23 +1714,6 @@ void CGDebugInfo::completeType(const RecordDecl *RD) {
completeRequiredType(RD);
}
-void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
- return;
-
- if (const auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD))
- if (CXXDecl->isDynamicClass())
- return;
-
- if (DebugTypeExtRefs && RD->isFromASTFile())
- return;
-
- QualType Ty = CGM.getContext().getRecordType(RD);
- llvm::DIType *T = getTypeOrNull(Ty);
- if (T && T->isForwardDecl())
- completeClassData(RD);
-}
-
void CGDebugInfo::completeClassData(const RecordDecl *RD) {
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
@@ -1633,21 +1739,37 @@ static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I,
/// Does a type definition exist in an imported clang module?
static bool isDefinedInClangModule(const RecordDecl *RD) {
+ // Only definitions that where imported from an AST file come from a module.
if (!RD || !RD->isFromASTFile())
return false;
+ // Anonymous entities cannot be addressed. Treat them as not from module.
if (!RD->isExternallyVisible() && RD->getName().empty())
return false;
if (auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD)) {
- assert(CXXDecl->isCompleteDefinition() && "incomplete record definition");
- if (CXXDecl->getTemplateSpecializationKind() != TSK_Undeclared)
- // Make sure the instantiation is actually in a module.
- if (CXXDecl->field_begin() != CXXDecl->field_end())
- return CXXDecl->field_begin()->isFromASTFile();
+ if (!CXXDecl->isCompleteDefinition())
+ return false;
+ auto TemplateKind = CXXDecl->getTemplateSpecializationKind();
+ if (TemplateKind != TSK_Undeclared) {
+ // This is a template, check the origin of the first member.
+ if (CXXDecl->field_begin() == CXXDecl->field_end())
+ return TemplateKind == TSK_ExplicitInstantiationDeclaration;
+ if (!CXXDecl->field_begin()->isFromASTFile())
+ return false;
+ }
}
-
return true;
}
+/// Return true if the class or any of its methods are marked dllimport.
+static bool isClassOrMethodDLLImport(const CXXRecordDecl *RD) {
+ if (RD->hasAttr<DLLImportAttr>())
+ return true;
+ for (const CXXMethodDecl *MD : RD->methods())
+ if (MD->hasAttr<DLLImportAttr>())
+ return true;
+ return false;
+}
+
static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
bool DebugTypeExtRefs, const RecordDecl *RD,
const LangOptions &LangOpts) {
@@ -1668,7 +1790,14 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
if (!CXXDecl)
return false;
- if (CXXDecl->hasDefinition() && CXXDecl->isDynamicClass())
+ // Only emit complete debug info for a dynamic class when its vtable is
+ // emitted. However, Microsoft debuggers don't resolve type information
+ // across DLL boundaries, so skip this optimization if the class or any of its
+ // methods are marked dllimport. This isn't a complete solution, since objects
+ // without any dllimport methods can be used in one DLL and constructed in
+ // another, but it is the current behavior of LimitedDebugInfo.
+ if (CXXDecl->hasDefinition() && CXXDecl->isDynamicClass() &&
+ !isClassOrMethodDLLImport(CXXDecl))
return true;
TemplateSpecializationKind Spec = TSK_Undeclared;
@@ -1683,6 +1812,16 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
return false;
}
+void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
+ if (shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD, CGM.getLangOpts()))
+ return;
+
+ QualType Ty = CGM.getContext().getRecordType(RD);
+ llvm::DIType *T = getTypeOrNull(Ty);
+ if (T && T->isForwardDecl())
+ completeClassData(RD);
+}
+
llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
llvm::DIType *T = cast_or_null<llvm::DIType>(getTypeOrNull(QualType(Ty, 0)));
@@ -1732,7 +1871,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
const auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (CXXDecl) {
CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl);
- CollectVTableInfo(CXXDecl, DefUnit, EltTys);
+ CollectVTableInfo(CXXDecl, DefUnit, EltTys, FwdDecl);
}
// Collect data fields (including static variables and any initializers).
@@ -1760,6 +1899,18 @@ llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectType *Ty,
return getOrCreateType(Ty->getBaseType(), Unit);
}
+llvm::DIType *CGDebugInfo::CreateType(const ObjCTypeParamType *Ty,
+ llvm::DIFile *Unit) {
+ // Ignore protocols.
+ SourceLocation Loc = Ty->getDecl()->getLocation();
+
+ // Use Typedefs to represent ObjCTypeParamType.
+ return DBuilder.createTypedef(
+ getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit),
+ Ty->getDecl()->getName(), getOrCreateFile(Loc), getLineNumber(Loc),
+ getDeclContextDescriptor(Ty->getDecl()));
+}
+
/// \return true if Getter has the default name for the property PD.
static bool hasDefaultGetterName(const ObjCPropertyDecl *PD,
const ObjCMethodDecl *Getter) {
@@ -1860,10 +2011,11 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
// but LLVM detects skeleton CUs by looking for a non-zero DWO id.
uint64_t Signature = Mod.getSignature() ? Mod.getSignature() : ~1ULL;
llvm::DIBuilder DIB(CGM.getModule());
- DIB.createCompileUnit(TheCU->getSourceLanguage(), Mod.getModuleName(),
- Mod.getPath(), TheCU->getProducer(), true,
- StringRef(), 0, Mod.getASTFile(),
- llvm::DICompileUnit::FullDebug, Signature);
+ DIB.createCompileUnit(TheCU->getSourceLanguage(),
+ DIB.createFile(Mod.getModuleName(), Mod.getPath()),
+ TheCU->getProducer(), true, StringRef(), 0,
+ Mod.getASTFile(), llvm::DICompileUnit::FullDebug,
+ Signature);
DIB.finalize();
}
llvm::DIModule *Parent =
@@ -1887,9 +2039,9 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (ID->getImplementation())
Flags |= llvm::DINode::FlagObjcClassComplete;
@@ -1915,7 +2067,8 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
if (!SClassTy)
return nullptr;
- llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
+ llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0,
+ llvm::DINode::FlagZero);
EltTys.push_back(InhTag);
}
@@ -1970,7 +2123,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
unsigned FieldLine = getLineNumber(Field->getLocation());
QualType FType = Field->getType();
uint64_t FieldSize = 0;
- unsigned FieldAlign = 0;
+ uint32_t FieldAlign = 0;
if (!FType->isIncompleteArrayType()) {
@@ -1978,7 +2131,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
FieldSize = Field->isBitField()
? Field->getBitWidthValue(CGM.getContext())
: CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
+ FieldAlign = getTypeAlignIfRequired(FType, CGM.getContext());
}
uint64_t FieldOffset;
@@ -1997,7 +2150,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
FieldOffset = RL.getFieldOffset(FieldNo);
}
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
Flags = llvm::DINode::FlagProtected;
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
@@ -2052,33 +2205,33 @@ llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray);
}
llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
uint64_t Size;
- uint64_t Align;
+ uint32_t Align;
// FIXME: make getTypeAlign() aware of VLAs and incomplete array types
if (const auto *VAT = dyn_cast<VariableArrayType>(Ty)) {
Size = 0;
- Align =
- CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
+ Align = getTypeAlignIfRequired(CGM.getContext().getBaseElementType(VAT),
+ CGM.getContext());
} else if (Ty->isIncompleteArrayType()) {
Size = 0;
if (Ty->getElementType()->isIncompleteType())
Align = 0;
else
- Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+ Align = getTypeAlignIfRequired(Ty->getElementType(), CGM.getContext());
} else if (Ty->isIncompleteType()) {
Size = 0;
Align = 0;
} else {
// Size and align of the whole array, not the element type.
Size = CGM.getContext().getTypeSize(Ty);
- Align = CGM.getContext().getTypeAlign(Ty);
+ Align = getTypeAlignIfRequired(Ty, CGM.getContext());
}
// Add the dimensions of the array. FIXME: This loses CV qualifiers from
@@ -2097,6 +2250,13 @@ llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
int64_t Count = -1; // Count == -1 is an unbounded array.
if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty))
Count = CAT->getSize().getZExtValue();
+ else if (const auto *VAT = dyn_cast<VariableArrayType>(Ty)) {
+ if (Expr *Size = VAT->getSizeExpr()) {
+ llvm::APSInt V;
+ if (Size->EvaluateAsInt(V, CGM.getContext()))
+ Count = V.getExtValue();
+ }
+ }
// FIXME: Verify this is right for VLAs.
Subscripts.push_back(DBuilder.getOrCreateSubrange(0, Count));
@@ -2123,7 +2283,7 @@ llvm::DIType *CGDebugInfo::CreateType(const RValueReferenceType *Ty,
llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
llvm::DIFile *U) {
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
uint64_t Size = 0;
if (!Ty->isIncompleteType()) {
@@ -2163,9 +2323,8 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
}
llvm::DIType *CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile *U) {
- // Ignore the atomic wrapping
- // FIXME: What is the correct representation?
- return getOrCreateType(Ty->getValueType(), U);
+ auto *FromTy = getOrCreateType(Ty->getValueType(), U);
+ return DBuilder.createQualifiedType(llvm::dwarf::DW_TAG_atomic_type, FromTy);
}
llvm::DIType* CGDebugInfo::CreateType(const PipeType *Ty,
@@ -2177,10 +2336,10 @@ llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
const EnumDecl *ED = Ty->getDecl();
uint64_t Size = 0;
- uint64_t Align = 0;
+ uint32_t Align = 0;
if (!ED->getTypeForDecl()->isIncompleteType()) {
Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
- Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
@@ -2220,10 +2379,10 @@ llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
const EnumDecl *ED = Ty->getDecl();
uint64_t Size = 0;
- uint64_t Align = 0;
+ uint32_t Align = 0;
if (!ED->getTypeForDecl()->isIncompleteType()) {
Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
- Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
@@ -2292,12 +2451,18 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::SubstTemplateTypeParm:
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
break;
- case Type::Auto:
+ case Type::Auto: {
QualType DT = cast<AutoType>(T)->getDeducedType();
assert(!DT.isNull() && "Undeduced types shouldn't reach here.");
T = DT;
break;
}
+ case Type::Adjusted:
+ case Type::Decayed:
+ // Decayed and adjusted types use the adjusted type in LLVM and DWARF.
+ T = cast<AdjustedType>(T)->getAdjustedType();
+ break;
+ }
assert(T != LastT && "Type unwrapping failed to unwrap!");
(void)LastT;
@@ -2406,6 +2571,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
case Type::ObjCObject:
return CreateType(cast<ObjCObjectType>(Ty), Unit);
+ case Type::ObjCTypeParam:
+ return CreateType(cast<ObjCTypeParamType>(Ty), Unit);
case Type::ObjCInterface:
return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
case Type::Builtin:
@@ -2414,11 +2581,6 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
return CreateType(cast<ComplexType>(Ty));
case Type::Pointer:
return CreateType(cast<PointerType>(Ty), Unit);
- case Type::Adjusted:
- case Type::Decayed:
- // Decayed and adjusted types use the adjusted type in LLVM and DWARF.
- return CreateType(
- cast<PointerType>(cast<AdjustedType>(Ty)->getAdjustedType()), Unit);
case Type::BlockPointer:
return CreateType(cast<BlockPointerType>(Ty), Unit);
case Type::Typedef:
@@ -2454,6 +2616,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::Auto:
case Type::Attributed:
+ case Type::Adjusted:
+ case Type::Decayed:
case Type::Elaborated:
case Type::Paren:
case Type::SubstTemplateTypeParm:
@@ -2518,13 +2682,13 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
return getOrCreateRecordFwdDecl(Ty, RDContext);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ auto Align = getDeclAlignIfRequired(D, CGM.getContext());
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
llvm::DICompositeType *RealDecl = DBuilder.createReplaceableCompositeType(
- getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align, 0,
- FullName);
+ getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align,
+ llvm::DINode::FlagZero, FullName);
// Elements of composite types usually have back to the type, creating
// uniquing cycles. Distinct nodes are more efficient.
@@ -2587,9 +2751,10 @@ llvm::DIType *CGDebugInfo::CreateMemberType(llvm::DIFile *Unit, QualType FType,
StringRef Name, uint64_t *Offset) {
llvm::DIType *FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
- unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
- llvm::DIType *Ty = DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize,
- FieldAlign, *Offset, 0, FieldTy);
+ auto FieldAlign = getTypeAlignIfRequired(FType, CGM.getContext());
+ llvm::DIType *Ty =
+ DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize, FieldAlign,
+ *Offset, llvm::DINode::FlagZero, FieldTy);
*Offset += FieldSize;
return Ty;
}
@@ -2599,7 +2764,7 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
StringRef &LinkageName,
llvm::DIScope *&FDContext,
llvm::DINodeArray &TParamsArray,
- unsigned &Flags) {
+ llvm::DINode::DIFlags &Flags) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
Name = getFunctionName(FD);
// Use mangled name as linkage name for C/C++ functions.
@@ -2624,6 +2789,9 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
llvm::DIScope *Mod = getParentModuleOrNull(RDecl);
FDContext = getContextDescriptor(RDecl, Mod ? Mod : TheCU);
}
+ // Check if it is a noreturn-marked function
+ if (FD->isNoReturn())
+ Flags |= llvm::DINode::FlagNoReturn;
// Collect template parameters.
TParamsArray = CollectFunctionTemplateParams(FD, Unit);
}
@@ -2680,7 +2848,7 @@ llvm::DISubprogram *
CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) {
llvm::DINodeArray TParamsArray;
StringRef Name, LinkageName;
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
SourceLocation Loc = FD->getLocation();
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *DContext = Unit;
@@ -2717,9 +2885,10 @@ CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) {
unsigned Line = getLineNumber(Loc);
collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, DContext);
+ auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
auto *GV = DBuilder.createTempGlobalVariableFwdDecl(
DContext, Name, LinkageName, Unit, Line, getOrCreateType(T, Unit),
- !VD->isExternallyVisible(), nullptr, nullptr);
+ !VD->isExternallyVisible(), nullptr, Align);
FwdDeclReplaceMap.emplace_back(
std::piecewise_construct,
std::make_tuple(cast<VarDecl>(VD->getCanonicalDecl())),
@@ -2737,8 +2906,12 @@ llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
getOrCreateFile(TD->getLocation()));
auto I = DeclCache.find(D->getCanonicalDecl());
- if (I != DeclCache.end())
- return dyn_cast_or_null<llvm::DINode>(I->second);
+ if (I != DeclCache.end()) {
+ auto N = I->second;
+ if (auto *GVE = dyn_cast_or_null<llvm::DIGlobalVariableExpression>(N))
+ return GVE->getVariable();
+ return dyn_cast_or_null<llvm::DINode>(N);
+ }
// No definition for now. Emit a forward definition that might be
// merged with a potential upcoming definition.
@@ -2834,7 +3007,8 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
Elts.push_back(DBuilder.createUnspecifiedParameter());
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
- return DBuilder.createSubroutineType(EltTypeArray, 0, getDwarfCC(CC));
+ return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero,
+ getDwarfCC(CC));
}
// Handle variadic function types; they need an additional
@@ -2848,7 +3022,8 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
EltTys.push_back(getOrCreateType(ParamType, F));
EltTys.push_back(DBuilder.createUnspecifiedParameter());
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
- return DBuilder.createSubroutineType(EltTypeArray, 0, getDwarfCC(CC));
+ return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero,
+ getDwarfCC(CC));
}
return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F));
@@ -2866,7 +3041,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
const Decl *D = GD.getDecl();
bool HasDecl = (D != nullptr);
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *FDContext = Unit;
llvm::DINodeArray TParamsArray;
@@ -2899,9 +3074,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
if (!HasDecl || D->isImplicit()) {
Flags |= llvm::DINode::FlagArtificial;
- // Artificial functions without a location should not silently reuse CurLoc.
- if (Loc.isInvalid())
- CurLoc = SourceLocation();
+ // Artificial functions should not silently reuse CurLoc.
+ CurLoc = SourceLocation();
}
unsigned LineNo = getLineNumber(Loc);
unsigned ScopeLine = getLineNumber(ScopeLoc);
@@ -2939,7 +3113,7 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
if (!D)
return;
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *FDContext = getDeclContextDescriptor(D);
llvm::DINodeArray TParamsArray;
@@ -3042,7 +3216,7 @@ llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
SmallVector<llvm::Metadata *, 5> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
- unsigned FieldAlign;
+ uint32_t FieldAlign;
llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
QualType Type = VD->getType();
@@ -3096,13 +3270,14 @@ llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
*XOffset = FieldOffset;
FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit, 0, FieldSize,
- FieldAlign, FieldOffset, 0, FieldTy);
+ FieldAlign, FieldOffset,
+ llvm::DINode::FlagZero, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
- unsigned Flags = llvm::DINode::FlagBlockByrefStruct;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagBlockByrefStruct;
return DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags,
nullptr, Elements);
@@ -3142,9 +3317,12 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
Column = getColumnNumber(VD->getLocation());
}
SmallVector<int64_t, 9> Expr;
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (VD->isImplicit())
Flags |= llvm::DINode::FlagArtificial;
+
+ auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
+
// If this is the first argument and it is implicit then
// give it an object pointer flag.
// FIXME: There has to be a better way to do this, but for static
@@ -3179,7 +3357,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
? DBuilder.createParameterVariable(Scope, VD->getName(),
*ArgNo, Unit, Line, Ty)
: DBuilder.createAutoVariable(Scope, VD->getName(), Unit,
- Line, Ty);
+ Line, Ty, Align);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
@@ -3209,9 +3387,10 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
continue;
// Use VarDecl's Tag, Scope and Line number.
+ auto FieldAlign = getDeclAlignIfRequired(Field, CGM.getContext());
auto *D = DBuilder.createAutoVariable(
Scope, FieldName, Unit, Line, FieldTy, CGM.getLangOpts().Optimize,
- Flags | llvm::DINode::FlagArtificial);
+ Flags | llvm::DINode::FlagArtificial, FieldAlign);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
@@ -3222,13 +3401,13 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
}
// Create the descriptor for the variable.
- auto *D =
- ArgNo
- ? DBuilder.createParameterVariable(Scope, Name, *ArgNo, Unit, Line,
- Ty, CGM.getLangOpts().Optimize,
- Flags)
- : DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
- CGM.getLangOpts().Optimize, Flags);
+ auto *D = ArgNo
+ ? DBuilder.createParameterVariable(
+ Scope, Name, *ArgNo, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags)
+ : DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags,
+ Align);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
@@ -3307,9 +3486,10 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
}
// Create the descriptor for the variable.
+ auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
auto *D = DBuilder.createAutoVariable(
cast<llvm::DILocalScope>(LexicalBlockStack.back()), VD->getName(), Unit,
- Line, Ty);
+ Line, Ty, false, llvm::DINode::FlagZero, Align);
// Insert an llvm.dbg.declare into the current block.
auto DL = llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back());
@@ -3438,17 +3618,19 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::DIType *fieldType;
if (capture->isByRef()) {
TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy);
+ auto Align = PtrInfo.AlignIsRequired ? PtrInfo.Align : 0;
// FIXME: this creates a second copy of this type!
uint64_t xoffset;
fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
fieldType = DBuilder.createPointerType(fieldType, PtrInfo.Width);
- fieldType =
- DBuilder.createMemberType(tunit, name, tunit, line, PtrInfo.Width,
- PtrInfo.Align, offsetInBits, 0, fieldType);
+ fieldType = DBuilder.createMemberType(tunit, name, tunit, line,
+ PtrInfo.Width, Align, offsetInBits,
+ llvm::DINode::FlagZero, fieldType);
} else {
+ auto Align = getDeclAlignIfRequired(variable, CGM.getContext());
fieldType = createFieldType(name, variable->getType(), loc, AS_public,
- offsetInBits, tunit, tunit);
+ offsetInBits, Align, tunit, tunit);
}
fields.push_back(fieldType);
}
@@ -3459,14 +3641,14 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::DINodeArray fieldsArray = DBuilder.getOrCreateArray(fields);
- llvm::DIType *type = DBuilder.createStructType(
- tunit, typeName.str(), tunit, line,
- CGM.getContext().toBits(block.BlockSize),
- CGM.getContext().toBits(block.BlockAlign), 0, nullptr, fieldsArray);
+ llvm::DIType *type =
+ DBuilder.createStructType(tunit, typeName.str(), tunit, line,
+ CGM.getContext().toBits(block.BlockSize), 0,
+ llvm::DINode::FlagZero, nullptr, fieldsArray);
type = DBuilder.createPointerType(type, CGM.PointerWidthInBits);
// Get overall information about the block.
- unsigned flags = llvm::DINode::FlagArtificial;
+ llvm::DINode::DIFlags flags = llvm::DINode::FlagArtificial;
auto *scope = cast<llvm::DILocalScope>(LexicalBlockStack.back());
// Create the descriptor for the parameter.
@@ -3505,10 +3687,10 @@ CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
return CreateRecordStaticField(D, Ctxt, cast<RecordDecl>(DC));
}
-llvm::DIGlobalVariable *CGDebugInfo::CollectAnonRecordDecls(
+llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls(
const RecordDecl *RD, llvm::DIFile *Unit, unsigned LineNo,
StringRef LinkageName, llvm::GlobalVariable *Var, llvm::DIScope *DContext) {
- llvm::DIGlobalVariable *GV = nullptr;
+ llvm::DIGlobalVariableExpression *GVE = nullptr;
for (const auto *Field : RD->fields()) {
llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit);
@@ -3517,16 +3699,17 @@ llvm::DIGlobalVariable *CGDebugInfo::CollectAnonRecordDecls(
// Ignore unnamed fields, but recurse into anonymous records.
if (FieldName.empty()) {
if (const auto *RT = dyn_cast<RecordType>(Field->getType()))
- GV = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName,
+ GVE = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName,
Var, DContext);
continue;
}
// Use VarDecl's Tag, Scope and Line number.
- GV = DBuilder.createGlobalVariable(DContext, FieldName, LinkageName, Unit,
- LineNo, FieldTy,
- Var->hasLocalLinkage(), Var, nullptr);
+ GVE = DBuilder.createGlobalVariableExpression(
+ DContext, FieldName, LinkageName, Unit, LineNo, FieldTy,
+ Var->hasLocalLinkage());
+ Var->addDebugInfo(GVE);
}
- return GV;
+ return GVE;
}
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
@@ -3534,6 +3717,14 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
assert(DebugKind >= codegenoptions::LimitedDebugInfo);
if (D->hasAttr<NoDebugAttr>())
return;
+
+ // If we already created a DIGlobalVariable for this declaration, just attach
+ // it to the llvm::GlobalVariable.
+ auto Cached = DeclCache.find(D->getCanonicalDecl());
+ if (Cached != DeclCache.end())
+ return Var->addDebugInfo(
+ cast<llvm::DIGlobalVariableExpression>(Cached->second));
+
// Create global variable debug descriptor.
llvm::DIFile *Unit = nullptr;
llvm::DIScope *DContext = nullptr;
@@ -3544,7 +3735,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
// Attempt to store one global variable for the declaration - even if we
// emit a lot of fields.
- llvm::DIGlobalVariable *GV = nullptr;
+ llvm::DIGlobalVariableExpression *GVE = nullptr;
// If this is an anonymous union then we'll want to emit a global
// variable for each member of the anonymous union so that it's possible
@@ -3553,21 +3744,23 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
assert(RD->isAnonymousStructOrUnion() &&
"unnamed non-anonymous struct or union?");
- GV = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
+ GVE = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
} else {
- GV = DBuilder.createGlobalVariable(
+ auto Align = getDeclAlignIfRequired(D, CGM.getContext());
+ GVE = DBuilder.createGlobalVariableExpression(
DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
- Var->hasLocalLinkage(), Var,
- getOrCreateStaticDataMemberDeclarationOrNull(D));
+ Var->hasLocalLinkage(), /*Expr=*/nullptr,
+ getOrCreateStaticDataMemberDeclarationOrNull(D), Align);
+ Var->addDebugInfo(GVE);
}
- DeclCache[D->getCanonicalDecl()].reset(GV);
+ DeclCache[D->getCanonicalDecl()].reset(GVE);
}
-void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
- llvm::Constant *Init) {
+void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
assert(DebugKind >= codegenoptions::LimitedDebugInfo);
if (VD->hasAttr<NoDebugAttr>())
return;
+ auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
// Create the descriptor for the variable.
llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
StringRef Name = VD->getName();
@@ -3604,9 +3797,20 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
auto &GV = DeclCache[VD];
if (GV)
return;
- GV.reset(DBuilder.createGlobalVariable(
+ llvm::DIExpression *InitExpr = nullptr;
+ if (CGM.getContext().getTypeSize(VD->getType()) <= 64) {
+ // FIXME: Add a representation for integer constants wider than 64 bits.
+ if (Init.isInt())
+ InitExpr =
+ DBuilder.createConstantValueExpression(Init.getInt().getExtValue());
+ else if (Init.isFloat())
+ InitExpr = DBuilder.createConstantValueExpression(
+ Init.getFloat().bitcastToAPInt().getZExtValue());
+ }
+ GV.reset(DBuilder.createGlobalVariableExpression(
DContext, Name, StringRef(), Unit, getLineNumber(VD->getLocation()), Ty,
- true, Init, getOrCreateStaticDataMemberDeclarationOrNull(VarD)));
+ true, InitExpr, getOrCreateStaticDataMemberDeclarationOrNull(VarD),
+ Align));
}
llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
@@ -3620,8 +3824,8 @@ void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) {
if (CGM.getCodeGenOpts().getDebugInfo() < codegenoptions::LimitedDebugInfo)
return;
const NamespaceDecl *NSDecl = UD.getNominatedNamespace();
- if (!NSDecl->isAnonymousNamespace() ||
- CGM.getCodeGenOpts().DebugExplicitImport) {
+ if (!NSDecl->isAnonymousNamespace() ||
+ CGM.getCodeGenOpts().DebugExplicitImport) {
DBuilder.createImportedModule(
getCurrentContextDescriptor(cast<Decl>(UD.getDeclContext())),
getOrCreateNameSpace(NSDecl),
@@ -3700,8 +3904,8 @@ CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
unsigned LineNo = getLineNumber(NSDecl->getLocation());
llvm::DIFile *FileD = getOrCreateFile(NSDecl->getLocation());
llvm::DIScope *Context = getDeclContextDescriptor(NSDecl);
- llvm::DINamespace *NS =
- DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo);
+ llvm::DINamespace *NS = DBuilder.createNameSpace(
+ Context, NSDecl->getName(), FileD, LineNo, NSDecl->isInline());
NameSpaceCache[NSDecl].reset(NS);
return NS;
}
@@ -3750,6 +3954,8 @@ void CGDebugInfo::finalize() {
else
Repl = it->second;
+ if (auto *GVE = dyn_cast_or_null<llvm::DIGlobalVariableExpression>(Repl))
+ Repl = GVE->getVariable();
DBuilder.replaceTemporary(std::move(FwdDecl), cast<llvm::MDNode>(Repl));
}
@@ -3770,3 +3976,12 @@ void CGDebugInfo::EmitExplicitCastType(QualType Ty) {
// Don't ignore in case of explicit cast where it is referenced indirectly.
DBuilder.retainType(DieTy);
}
+
+llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) {
+ if (LexicalBlockStack.empty())
+ return llvm::DebugLoc();
+
+ llvm::MDNode *Scope = LexicalBlockStack.back();
+ return llvm::DebugLoc::get(
+ getLineNumber(Loc), getColumnNumber(Loc), Scope);
+}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 366dd81ac812..ac2e8dd2e0a4 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -15,12 +15,14 @@
#define LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
#include "CGBuilder.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Type.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DebugInfo.h"
@@ -32,7 +34,6 @@ class MDNode;
}
namespace clang {
-class CXXMethodDecl;
class ClassTemplateSpecializationDecl;
class GlobalDecl;
class ModuleMap;
@@ -67,6 +68,7 @@ class CGDebugInfo {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
llvm::DIType *SingletonId = nullptr;
#include "clang/Basic/OpenCLImageTypes.def"
+ llvm::DIType *OCLSamplerDITy = nullptr;
llvm::DIType *OCLEventDITy = nullptr;
llvm::DIType *OCLClkEventDITy = nullptr;
llvm::DIType *OCLQueueDITy = nullptr;
@@ -96,8 +98,7 @@ class CGDebugInfo {
/// List of interfaces we want to keep even if orphaned.
std::vector<void *> RetainedTypes;
- /// Cache of forward declared types to RAUW at the end of
- /// compilation.
+ /// Cache of forward declared types to RAUW at the end of compilation.
std::vector<std::pair<const TagType *, llvm::TrackingMDRef>> ReplaceMap;
/// Cache of replaceable forward declarations (functions and
@@ -155,6 +156,8 @@ class CGDebugInfo {
llvm::DIFile *F);
/// Get Objective-C object type.
llvm::DIType *CreateType(const ObjCObjectType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const ObjCTypeParamType *Ty, llvm::DIFile *Unit);
+
llvm::DIType *CreateType(const VectorType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const ArrayType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const LValueReferenceType *Ty, llvm::DIFile *F);
@@ -216,6 +219,15 @@ class CGDebugInfo {
SmallVectorImpl<llvm::Metadata *> &EltTys,
llvm::DIType *RecordTy);
+ /// Helper function for CollectCXXBases.
+ /// Adds debug info entries for types in Bases that are not in SeenTypes.
+ void CollectCXXBasesAux(const CXXRecordDecl *RD, llvm::DIFile *Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
+ llvm::DIType *RecordTy,
+ const CXXRecordDecl::base_class_const_range &Bases,
+ llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> &SeenTypes,
+ llvm::DINode::DIFlags StartingFlags);
+
/// A helper function to collect template parameters.
llvm::DINodeArray CollectTemplateParams(const TemplateParameterList *TPList,
ArrayRef<TemplateArgument> TAList,
@@ -233,9 +245,19 @@ class CGDebugInfo {
llvm::DIType *createFieldType(StringRef name, QualType type,
SourceLocation loc, AccessSpecifier AS,
+ uint64_t offsetInBits,
+ uint32_t AlignInBits,
+ llvm::DIFile *tunit, llvm::DIScope *scope,
+ const RecordDecl *RD = nullptr);
+
+ llvm::DIType *createFieldType(StringRef name, QualType type,
+ SourceLocation loc, AccessSpecifier AS,
uint64_t offsetInBits, llvm::DIFile *tunit,
llvm::DIScope *scope,
- const RecordDecl *RD = nullptr);
+ const RecordDecl *RD = nullptr) {
+ return createFieldType(name, type, loc, AS, offsetInBits, 0, tunit, scope,
+ RD);
+ }
/// Create new bit field member.
llvm::DIType *createBitFieldType(const FieldDecl *BitFieldDecl,
@@ -254,6 +276,8 @@ class CGDebugInfo {
llvm::DIFile *F,
SmallVectorImpl<llvm::Metadata *> &E,
llvm::DIType *RecordTy, const RecordDecl *RD);
+ void CollectRecordNestedRecord(const RecordDecl *RD,
+ SmallVectorImpl<llvm::Metadata *> &E);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile *F,
SmallVectorImpl<llvm::Metadata *> &E,
llvm::DICompositeType *RecordTy);
@@ -261,7 +285,8 @@ class CGDebugInfo {
/// If the C++ class has vtable info then insert appropriate debug
/// info entry in EltTys vector.
void CollectVTableInfo(const CXXRecordDecl *Decl, llvm::DIFile *F,
- SmallVectorImpl<llvm::Metadata *> &EltTys);
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
+ llvm::DICompositeType *RecordTy);
/// @}
/// Create a new lexical block node and push it on the stack.
@@ -295,6 +320,9 @@ public:
/// ignored.
void setLocation(SourceLocation Loc);
+ // Converts a SourceLocation to a DebugLoc
+ llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Loc);
+
/// Emit metadata to indicate a change in line/column information in
/// the source file. If the location is invalid, the previous
/// location will be reused.
@@ -350,8 +378,8 @@ public:
/// Emit information about a global variable.
void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
- /// Emit global variable's debug info.
- void EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init);
+ /// Emit a constant global variable's debug info.
+ void EmitGlobalVariable(const ValueDecl *VD, const APValue &Init);
/// Emit C++ using directive.
void EmitUsingDirective(const UsingDirectiveDecl &UD);
@@ -414,6 +442,10 @@ private:
/// Remap a given path with the current debug prefix map
std::string remapDIPath(StringRef) const;
+ /// Compute the file checksum debug info for input file ID.
+ llvm::DIFile::ChecksumKind computeChecksum(FileID FID,
+ SmallString<32> &Checksum) const;
+
/// Get the file debug info descriptor for the input location.
llvm::DIFile *getOrCreateFile(SourceLocation Loc);
@@ -468,14 +500,14 @@ private:
llvm::DIGlobalVariable *
getGlobalVariableForwardDeclaration(const VarDecl *VD);
- /// \brief Return a global variable that represents one of the
- /// collection of global variables created for an anonmyous union.
+ /// Return a global variable that represents one of the collection of global
+ /// variables created for an anonmyous union.
///
/// Recursively collect all of the member fields of a global
/// anonymous decl and create static variables for them. The first
/// time this is called it needs to be on a union and then from
/// there we can have additional unnamed fields.
- llvm::DIGlobalVariable *
+ llvm::DIGlobalVariableExpression *
CollectAnonRecordDecls(const RecordDecl *RD, llvm::DIFile *Unit,
unsigned LineNo, StringRef LinkageName,
llvm::GlobalVariable *Var, llvm::DIScope *DContext);
@@ -514,7 +546,7 @@ private:
StringRef &Name, StringRef &LinkageName,
llvm::DIScope *&FDContext,
llvm::DINodeArray &TParamsArray,
- unsigned &Flags);
+ llvm::DINode::DIFlags &Flags);
/// Collect various properties of a VarDecl.
void collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 89407cd70c3d..d76136380160 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -13,6 +13,7 @@
#include "CodeGenFunction.h"
#include "CGBlocks.h"
+#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGDebugInfo.h"
#include "CGOpenCLRuntime.h"
@@ -77,6 +78,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::PragmaDetectMismatch:
case Decl::AccessSpec:
case Decl::LinkageSpec:
+ case Decl::Export:
case Decl::ObjCPropertyImpl:
case Decl::FileScopeAsm:
case Decl::Friend:
@@ -87,6 +89,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::UsingShadow:
case Decl::ConstructorUsingShadow:
case Decl::ObjCTypeParam:
+ case Decl::Binding:
llvm_unreachable("Declaration should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
@@ -110,15 +113,25 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitUsingDecl(cast<UsingDecl>(D));
return;
+ case Decl::UsingPack:
+ for (auto *Using : cast<UsingPackDecl>(D).expansions())
+ EmitDecl(*Using);
+ return;
case Decl::UsingDirective: // using namespace X; [C++]
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
return;
- case Decl::Var: {
+ case Decl::Var:
+ case Decl::Decomposition: {
const VarDecl &VD = cast<VarDecl>(D);
assert(VD.isLocalVarDecl() &&
"Should not see file-scope variables inside a function!");
- return EmitVarDecl(VD);
+ EmitVarDecl(VD);
+ if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
+ for (auto *B : DD->bindings())
+ if (auto *HD = B->getHoldingVar())
+ EmitVarDecl(*HD);
+ return;
}
case Decl::OMPDeclareReduction:
@@ -526,7 +539,8 @@ namespace {
CallArgList Args;
Args.add(RValue::get(Arg),
CGF.getContext().getPointerType(Var.getType()));
- CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
+ auto Callee = CGCallee::forDirect(CleanupFn);
+ CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
}
};
} // end anonymous namespace
@@ -698,7 +712,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
}
auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
- llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
+ llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
// If __weak, we want to use a barrier under certain conditions.
if (lifetime == Qualifiers::OCL_Weak)
@@ -765,37 +779,6 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
}
-/// EmitScalarInit - Initialize the given lvalue with the given object.
-void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
- Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
- if (!lifetime)
- return EmitStoreThroughLValue(RValue::get(init), lvalue, true);
-
- switch (lifetime) {
- case Qualifiers::OCL_None:
- llvm_unreachable("present but none");
-
- case Qualifiers::OCL_ExplicitNone:
- // nothing to do
- break;
-
- case Qualifiers::OCL_Strong:
- init = EmitARCRetain(lvalue.getType(), init);
- break;
-
- case Qualifiers::OCL_Weak:
- // Initialize and then skip the primitive store.
- EmitARCInitWeak(lvalue.getAddress(), init);
- return;
-
- case Qualifiers::OCL_Autoreleasing:
- init = EmitARCRetainAutorelease(lvalue.getType(), init);
- break;
- }
-
- EmitStoreOfScalar(init, lvalue, /* isInitialization */ true);
-}
-
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
/// non-zero parts of the specified initializer with equal or fewer than
/// NumStores scalar stores.
@@ -907,29 +890,12 @@ void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
EmitAutoVarCleanups(emission);
}
-/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
-/// markers.
-static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
- const LangOptions &LangOpts) {
- // Asan uses markers for use-after-scope checks.
- if (CGOpts.SanitizeAddressUseAfterScope)
- return true;
-
- // Disable lifetime markers in msan builds.
- // FIXME: Remove this when msan works with lifetime markers.
- if (LangOpts.Sanitize.has(SanitizerKind::Memory))
- return false;
-
- // For now, only in optimized builds.
- return CGOpts.OptimizationLevel != 0;
-}
-
/// Emit a lifetime.begin marker if some criteria are satisfied.
/// \return a pointer to the temporary size Value if a marker was emitted, null
/// otherwise
llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
llvm::Value *Addr) {
- if (!shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), getLangOpts()))
+ if (!ShouldEmitLifetimeMarkers)
return nullptr;
llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
@@ -986,8 +952,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// If the variable's a const type, and it's neither an NRVO
// candidate nor a __block variable and has no mutable members,
// emit it as a global instead.
- if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
- CGM.isTypeConstant(Ty, true)) {
+ // Exception is if a variable is located in non-constant address space
+ // in OpenCL.
+ if ((!getLangOpts().OpenCL ||
+ Ty.getAddressSpace() == LangAS::opencl_constant) &&
+ (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
+ CGM.isTypeConstant(Ty, true))) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
// Signal this condition to later callbacks.
@@ -1049,12 +1019,18 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
bool IsMSCatchParam =
D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
- // Emit a lifetime intrinsic if meaningful. There's no point
- // in doing this if we don't have a valid insertion point (?).
+ // Emit a lifetime intrinsic if meaningful. There's no point in doing this
+ // if we don't have a valid insertion point (?).
if (HaveInsertPoint() && !IsMSCatchParam) {
- uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
- emission.SizeForLifetimeMarkers =
- EmitLifetimeStart(size, address.getPointer());
+ // goto or switch-case statements can break lifetime into several
+ // regions which need more efforts to handle them correctly. PR28267
+ // This is rare case, but it's better just omit intrinsics than have
+ // them incorrectly placed.
+ if (!Bypasses.IsBypassed(&D)) {
+ uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
+ emission.SizeForLifetimeMarkers =
+ EmitLifetimeStart(size, address.getPointer());
+ }
} else {
assert(!emission.useLifetimeMarkers());
}
@@ -1257,10 +1233,16 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
// Otherwise, create a temporary global with the initializer then
// memcpy from the global to the alloca.
std::string Name = getStaticDeclName(CGM, D);
+ unsigned AS = 0;
+ if (getLangOpts().OpenCL) {
+ AS = CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant);
+ BP = llvm::PointerType::getInt8PtrTy(getLLVMContext(), AS);
+ }
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage,
- constant, Name);
+ constant, Name, nullptr,
+ llvm::GlobalValue::NotThreadLocal, AS);
GV->setAlignment(Loc.getAlignment().getQuantity());
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
@@ -1762,6 +1744,24 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
setBlockContextParameter(IPD, ArgNo, Arg.getDirectValue());
return;
}
+
+ // Apply any prologue 'this' adjustments required by the ABI. Be careful to
+ // handle the case where 'this' is passed indirectly as part of an inalloca
+ // struct.
+ if (const CXXMethodDecl *MD =
+ dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
+ if (MD->isVirtual() && IPD == CXXABIThisDecl) {
+ llvm::Value *This = Arg.isIndirect()
+ ? Builder.CreateLoad(Arg.getIndirectAddress())
+ : Arg.getDirectValue();
+ This = CGM.getCXXABI().adjustThisParameterInVirtualFunctionPrologue(
+ *this, CurGD, This);
+ if (Arg.isIndirect())
+ Builder.CreateStore(This, Arg.getIndirectAddress());
+ else
+ Arg = ParamValue::forDirect(This);
+ }
+ }
}
Address DeclPtr = Address::invalid();
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 89d142e44b49..8d9d0b21bfe1 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -121,13 +121,15 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
/// constant from this point onwards.
static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *Addr) {
- // Don't emit the intrinsic if we're not optimizing.
+ // Do not emit the intrinsic if we're not optimizing.
if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
return;
// Grab the llvm.invariant.start intrinsic.
llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
- llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID);
+ // Overloaded address space type.
+ llvm::Type *ObjectPtr[1] = {CGF.Int8PtrTy};
+ llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID, ObjectPtr);
// Emit a call with the size in bytes of the object.
CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
@@ -235,7 +237,8 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
llvm::Constant *atexit =
- CGM.CreateRuntimeFunction(atexitTy, "atexit");
+ CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeSet(),
+ /*Local=*/true);
if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
atexitFn->setDoesNotThrow();
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 4a7dc4205e09..7b7880e07a95 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -221,10 +221,9 @@ const EHPersonality &EHPersonality::get(CodeGenFunction &CGF) {
static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
- llvm::Constant *Fn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
- Personality.PersonalityFn);
- return Fn;
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
+ Personality.PersonalityFn,
+ llvm::AttributeSet(), /*Local=*/true);
}
static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
@@ -698,6 +697,10 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
return nullptr;
}
+ // CUDA device code doesn't have exceptions.
+ if (LO.CUDA && LO.CUDAIsDevice)
+ return nullptr;
+
// Check the innermost scope for a cached landing pad. If this is
// a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
@@ -1429,7 +1432,8 @@ struct PerformSEHFinally final : EHScopeStack::Cleanup {
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, Args);
- CGF.EmitCall(FnInfo, OutlinedFinally, ReturnValueSlot(), Args);
+ auto Callee = CGCallee::forDirect(OutlinedFinally);
+ CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
}
};
} // end anonymous namespace
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 5f3b290d8eb1..183201c78e36 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/NSAPI.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringExtras.h"
@@ -36,6 +37,8 @@
#include "llvm/Support/Path.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
+#include <string>
+
using namespace clang;
using namespace CodeGen;
@@ -607,7 +610,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::ConstantInt::get(SizeTy, AlignVal),
llvm::ConstantInt::get(Int8Ty, TCK)
};
- EmitCheck(Checks, "type_mismatch", StaticData, Ptr);
+ EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData, Ptr);
}
// If possible, check that the vptr indicates that there is a subobject of
@@ -675,7 +678,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
};
llvm::Value *DynamicData[] = { Ptr, Hash };
EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
- "dynamic_type_cache_miss", StaticData, DynamicData);
+ SanitizerHandler::DynamicTypeCacheMiss, StaticData,
+ DynamicData);
}
}
@@ -708,6 +712,8 @@ static bool isFlexibleArrayMemberExpr(const Expr *E) {
DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
return ++FI == FD->getParent()->field_end();
}
+ } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) {
+ return IRE->getDecl()->getNextIvar() == nullptr;
}
return false;
@@ -763,8 +769,8 @@ void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
};
llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
: Builder.CreateICmpULE(IndexVal, BoundVal);
- EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), "out_of_bounds",
- StaticData, Index);
+ EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
+ SanitizerHandler::OutOfBounds, StaticData, Index);
}
@@ -1180,10 +1186,10 @@ CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
// This should probably fire even for
if (isa<VarDecl>(value)) {
if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
- EmitDeclRefExprDbgValue(refExpr, C);
+ EmitDeclRefExprDbgValue(refExpr, result.Val);
} else {
assert(isa<EnumConstantDecl>(value));
- EmitDeclRefExprDbgValue(refExpr, C);
+ EmitDeclRefExprDbgValue(refExpr, result.Val);
}
// If we emitted a reference constant, we need to dereference that.
@@ -1217,11 +1223,10 @@ static bool hasBooleanRepresentation(QualType Ty) {
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
llvm::APInt &Min, llvm::APInt &End,
- bool StrictEnums) {
+ bool StrictEnums, bool IsBool) {
const EnumType *ET = Ty->getAs<EnumType>();
bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
ET && !ET->getDecl()->isFixed();
- bool IsBool = hasBooleanRepresentation(Ty);
if (!IsBool && !IsRegularCPlusPlusEnum)
return false;
@@ -1251,8 +1256,8 @@ static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
llvm::APInt Min, End;
- if (!getRangeForType(*this, Ty, Min, End,
- CGM.getCodeGenOpts().StrictEnums))
+ if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
+ hasBooleanRepresentation(Ty)))
return nullptr;
llvm::MDBuilder MDHelper(getLLVMContext());
@@ -1311,14 +1316,15 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
false /*ConvertTypeToTag*/);
}
- bool NeedsBoolCheck =
- SanOpts.has(SanitizerKind::Bool) && hasBooleanRepresentation(Ty);
+ bool IsBool = hasBooleanRepresentation(Ty) ||
+ NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
+ bool NeedsBoolCheck = SanOpts.has(SanitizerKind::Bool) && IsBool;
bool NeedsEnumCheck =
SanOpts.has(SanitizerKind::Enum) && Ty->getAs<EnumType>();
if (NeedsBoolCheck || NeedsEnumCheck) {
SanitizerScope SanScope(this);
llvm::APInt Min, End;
- if (getRangeForType(*this, Ty, Min, End, true)) {
+ if (getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) {
--End;
llvm::Value *Check;
if (!Min)
@@ -1336,8 +1342,8 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
EmitCheckTypeDescriptor(Ty)
};
SanitizerMask Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
- EmitCheck(std::make_pair(Check, Kind), "load_invalid_value", StaticArgs,
- EmitCheckValue(Load));
+ EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
+ StaticArgs, EmitCheckValue(Load));
}
} else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
@@ -1627,11 +1633,19 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
break;
case Qualifiers::OCL_Strong:
+ if (isInit) {
+ Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
+ break;
+ }
EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
return;
case Qualifiers::OCL_Weak:
- EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
+ if (isInit)
+ // Initialize and then skip the primitive store.
+ EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());
+ else
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
return;
case Qualifiers::OCL_Autoreleasing:
@@ -2015,9 +2029,14 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
return LV;
}
-static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
- const Expr *E, const FunctionDecl *FD) {
- llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
+static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
+ const FunctionDecl *FD) {
+ if (FD->hasAttr<WeakRefAttr>()) {
+ ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
+ return aliasee.getPointer();
+ }
+
+ llvm::Constant *V = CGM.GetAddrOfFunction(FD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
FD->getType()->getAs<FunctionProtoType>()) {
@@ -2025,11 +2044,18 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
// isn't the same as the type of a use. Correct for this with a
// bitcast.
QualType NoProtoType =
- CGF.getContext().getFunctionNoProtoType(Proto->getReturnType());
- NoProtoType = CGF.getContext().getPointerType(NoProtoType);
- V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
+ CGM.getContext().getFunctionNoProtoType(Proto->getReturnType());
+ NoProtoType = CGM.getContext().getPointerType(NoProtoType);
+ V = llvm::ConstantExpr::getBitCast(V,
+ CGM.getTypes().ConvertType(NoProtoType));
}
}
+ return V;
+}
+
+static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const FunctionDecl *FD) {
+ llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, FD);
CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
return CGF.MakeAddrLValue(V, E->getType(), Alignment, AlignmentSource::Decl);
}
@@ -2205,6 +2231,12 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (const auto *FD = dyn_cast<FunctionDecl>(ND))
return EmitFunctionDeclLValue(*this, E, FD);
+ // FIXME: While we're emitting a binding from an enclosing scope, all other
+ // DeclRefExprs we see should be implicitly treated as if they also refer to
+ // an enclosing scope.
+ if (const auto *BD = dyn_cast<BindingDecl>(ND))
+ return EmitLValue(BD->getBinding());
+
llvm_unreachable("Unhandled DeclRefExpr");
}
@@ -2291,9 +2323,19 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
StringRef NameItems[] = {
PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
- if (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)) {
- auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
- return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
+ if (auto *BD = dyn_cast<BlockDecl>(CurCodeDecl)) {
+ std::string Name = SL->getString();
+ if (!Name.empty()) {
+ unsigned Discriminator =
+ CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
+ if (Discriminator)
+ Name += "_" + Twine(Discriminator + 1).str();
+ auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
+ return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
+ } else {
+ auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
+ return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
+ }
}
auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
@@ -2461,17 +2503,35 @@ static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
}
}
+namespace {
+struct SanitizerHandlerInfo {
+ char const *const Name;
+ unsigned Version;
+};
+}
+
+const SanitizerHandlerInfo SanitizerHandlers[] = {
+#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
+ LIST_SANITIZER_CHECKS
+#undef SANITIZER_CHECK
+};
+
static void emitCheckHandlerCall(CodeGenFunction &CGF,
llvm::FunctionType *FnType,
ArrayRef<llvm::Value *> FnArgs,
- StringRef CheckName,
+ SanitizerHandler CheckHandler,
CheckRecoverableKind RecoverKind, bool IsFatal,
llvm::BasicBlock *ContBB) {
assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
bool NeedsAbortSuffix =
IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
- std::string FnName = ("__ubsan_handle_" + CheckName +
- (NeedsAbortSuffix ? "_abort" : "")).str();
+ const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
+ const StringRef CheckName = CheckInfo.Name;
+ std::string FnName =
+ ("__ubsan_handle_" + CheckName +
+ (CheckInfo.Version ? "_v" + llvm::utostr(CheckInfo.Version) : "") +
+ (NeedsAbortSuffix ? "_abort" : ""))
+ .str();
bool MayReturn =
!IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
@@ -2485,7 +2545,8 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(
FnType, FnName,
llvm::AttributeSet::get(CGF.getLLVMContext(),
- llvm::AttributeSet::FunctionIndex, B));
+ llvm::AttributeSet::FunctionIndex, B),
+ /*Local=*/true);
llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
if (!MayReturn) {
HandlerCall->setDoesNotReturn();
@@ -2497,10 +2558,13 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
void CodeGenFunction::EmitCheck(
ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
- StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs,
+ SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
ArrayRef<llvm::Value *> DynamicArgs) {
assert(IsSanitizerScope);
assert(Checked.size() > 0);
+ assert(CheckHandler >= 0 &&
+ CheckHandler < sizeof(SanitizerHandlers) / sizeof(*SanitizerHandlers));
+ const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
llvm::Value *FatalCond = nullptr;
llvm::Value *RecoverableCond = nullptr;
@@ -2580,7 +2644,7 @@ void CodeGenFunction::EmitCheck(
if (!FatalCond || !RecoverableCond) {
// Simple case: we need to generate a single handler call, either
// fatal, or non-fatal.
- emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind,
+ emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
(FatalCond != nullptr), Cont);
} else {
// Emit two handler calls: first one for set of unrecoverable checks,
@@ -2590,10 +2654,10 @@ void CodeGenFunction::EmitCheck(
llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
EmitBlock(FatalHandlerBB);
- emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, true,
+ emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
NonFatalHandlerBB);
EmitBlock(NonFatalHandlerBB);
- emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, false,
+ emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
Cont);
}
@@ -2718,7 +2782,7 @@ void CodeGenFunction::EmitCfiCheckFail() {
llvm::Value *Cond =
Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
if (CGM.getLangOpts().Sanitize.has(Mask))
- EmitCheck(std::make_pair(Cond, Mask), "cfi_check_fail", {},
+ EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
{Data, Addr, ValidVtable});
else
EmitTrapCheck(Cond);
@@ -2753,10 +2817,11 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) {
llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID));
- if (!CGM.getCodeGenOpts().TrapFuncName.empty())
- TrapCall->addAttribute(llvm::AttributeSet::FunctionIndex,
- "trap-func-name",
- CGM.getCodeGenOpts().TrapFuncName);
+ if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
+ auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
+ CGM.getCodeGenOpts().TrapFuncName);
+ TrapCall->addAttribute(llvm::AttributeSet::FunctionIndex, A);
+ }
return TrapCall;
}
@@ -2869,13 +2934,30 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed) {
- // The index must always be an integer, which is not an aggregate. Emit it.
- llvm::Value *Idx = EmitScalarExpr(E->getIdx());
- QualType IdxTy = E->getIdx()->getType();
- bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
+ // The index must always be an integer, which is not an aggregate. Emit it
+ // in lexical order (this complexity is, sadly, required by C++17).
+ llvm::Value *IdxPre =
+ (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
+ auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
+ auto *Idx = IdxPre;
+ if (E->getLHS() != E->getIdx()) {
+ assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
+ Idx = EmitScalarExpr(E->getIdx());
+ }
+
+ QualType IdxTy = E->getIdx()->getType();
+ bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
- if (SanOpts.has(SanitizerKind::ArrayBounds))
- EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
+ if (SanOpts.has(SanitizerKind::ArrayBounds))
+ EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
+
+ // Extend or truncate the index type to 32 or 64-bits.
+ if (Promote && Idx->getType() != IntPtrTy)
+ Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
+
+ return Idx;
+ };
+ IdxPre = nullptr;
// If the base is a vector type, then we are forming a vector element lvalue
// with this subscript.
@@ -2883,6 +2965,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
!isa<ExtVectorElementExpr>(E->getBase())) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
+ auto *Idx = EmitIdxAfterBase(/*Promote*/false);
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
E->getBase()->getType(),
@@ -2891,13 +2974,10 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// All the other cases basically behave like simple offsetting.
- // Extend or truncate the index type to 32 or 64-bits.
- if (Idx->getType() != IntPtrTy)
- Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
-
// Handle the extvector case we ignored above.
if (isa<ExtVectorElementExpr>(E->getBase())) {
LValue LV = EmitLValue(E->getBase());
+ auto *Idx = EmitIdxAfterBase(/*Promote*/true);
Address Addr = EmitExtVectorElementLValue(LV);
QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
@@ -2913,6 +2993,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// it. It needs to be emitted first in case it's what captures
// the VLA bounds.
Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+ auto *Idx = EmitIdxAfterBase(/*Promote*/true);
// The element count here is the total number of non-VLA elements.
llvm::Value *numElements = getVLASize(vla).first;
@@ -2932,14 +3013,16 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
- CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
- llvm::Value *InterfaceSizeVal =
- llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());;
-
- llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
// Emit the base pointer.
Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+ auto *Idx = EmitIdxAfterBase(/*Promote*/true);
+
+ CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
+ llvm::Value *InterfaceSizeVal =
+ llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
+
+ llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
// We don't necessarily build correct LLVM struct types for ObjC
// interfaces, so we can't rely on GEP to do this scaling
@@ -2971,6 +3054,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
else
ArrayLV = EmitLValue(Array);
+ auto *Idx = EmitIdxAfterBase(/*Promote*/true);
// Propagate the alignment from the array itself to the result.
Addr = emitArraySubscriptGEP(*this, ArrayLV.getAddress(),
@@ -2981,6 +3065,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
} else {
// The base must be a pointer; emit it with an estimate of its alignment.
Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+ auto *Idx = EmitIdxAfterBase(/*Promote*/true);
Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
!getLangOpts().isSignedOverflowDefined());
}
@@ -3463,7 +3548,7 @@ LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
return EmitAggExprToLValue(E);
// An lvalue initializer list must be initializing a reference.
- assert(E->getNumInits() == 1 && "reference init with multiple values");
+ assert(E->isTransparent() && "non-transparent glvalue init list");
return EmitLValue(E->getInit(0));
}
@@ -3602,6 +3687,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
case CK_Dependent:
@@ -3695,6 +3781,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
ConvertType(E->getType()));
return MakeAddrLValue(V, E->getType(), LV.getAlignmentSource());
}
+ case CK_ZeroToOCLQueue:
+ llvm_unreachable("NULL to OpenCL queue lvalue cast is not valid");
case CK_ZeroToOCLEvent:
llvm_unreachable("NULL to OpenCL event lvalue cast is not valid");
}
@@ -3743,70 +3831,86 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
return EmitCUDAKernelCallExpr(CE, ReturnValue);
- const Decl *TargetDecl = E->getCalleeDecl();
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
- if (unsigned builtinID = FD->getBuiltinID())
- return EmitBuiltinExpr(FD, builtinID, E, ReturnValue);
- }
-
if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
- if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
+ if (const CXXMethodDecl *MD =
+ dyn_cast_or_null<CXXMethodDecl>(CE->getCalleeDecl()))
return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
- if (const auto *PseudoDtor =
- dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
- QualType DestroyedType = PseudoDtor->getDestroyedType();
- if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
- // Automatic Reference Counting:
- // If the pseudo-expression names a retainable object with weak or
- // strong lifetime, the object shall be released.
- Expr *BaseExpr = PseudoDtor->getBase();
- Address BaseValue = Address::invalid();
- Qualifiers BaseQuals;
-
- // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
- if (PseudoDtor->isArrow()) {
- BaseValue = EmitPointerWithAlignment(BaseExpr);
- const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
- BaseQuals = PTy->getPointeeType().getQualifiers();
- } else {
- LValue BaseLV = EmitLValue(BaseExpr);
- BaseValue = BaseLV.getAddress();
- QualType BaseTy = BaseExpr->getType();
- BaseQuals = BaseTy.getQualifiers();
- }
+ CGCallee callee = EmitCallee(E->getCallee());
- switch (DestroyedType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- break;
+ if (callee.isBuiltin()) {
+ return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
+ E, ReturnValue);
+ }
- case Qualifiers::OCL_Strong:
- EmitARCRelease(Builder.CreateLoad(BaseValue,
- PseudoDtor->getDestroyedType().isVolatileQualified()),
- ARCPreciseLifetime);
- break;
+ if (callee.isPseudoDestructor()) {
+ return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());
+ }
- case Qualifiers::OCL_Weak:
- EmitARCDestroyWeak(BaseValue);
- break;
- }
- } else {
- // C++ [expr.pseudo]p1:
- // The result shall only be used as the operand for the function call
- // operator (), and the result of such a call has type void. The only
- // effect is the evaluation of the postfix-expression before the dot or
- // arrow.
- EmitScalarExpr(E->getCallee());
+ return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);
+}
+
+/// Emit a CallExpr without considering whether it might be a subclass.
+RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ CGCallee Callee = EmitCallee(E->getCallee());
+ return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
+}
+
+static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) {
+ if (auto builtinID = FD->getBuiltinID()) {
+ return CGCallee::forBuiltin(builtinID, FD);
+ }
+
+ llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, FD);
+ return CGCallee::forDirect(calleePtr, FD);
+}
+
+CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
+ E = E->IgnoreParens();
+
+ // Look through function-to-pointer decay.
+ if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
+ ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
+ return EmitCallee(ICE->getSubExpr());
}
- return RValue::get(nullptr);
+ // Resolve direct calls.
+ } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
+ return EmitDirectCallee(*this, FD);
+ }
+ } else if (auto ME = dyn_cast<MemberExpr>(E)) {
+ if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
+ EmitIgnoredExpr(ME->getBase());
+ return EmitDirectCallee(*this, FD);
+ }
+
+ // Look through template substitutions.
+ } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ return EmitCallee(NTTP->getReplacement());
+
+ // Treat pseudo-destructor calls differently.
+ } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
+ return CGCallee::forPseudoDestructor(PDE);
}
- llvm::Value *Callee = EmitScalarExpr(E->getCallee());
- return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
- TargetDecl);
+ // Otherwise, we have an indirect reference.
+ llvm::Value *calleePtr;
+ QualType functionType;
+ if (auto ptrType = E->getType()->getAs<PointerType>()) {
+ calleePtr = EmitScalarExpr(E);
+ functionType = ptrType->getPointeeType();
+ } else {
+ functionType = E->getType();
+ calleePtr = EmitLValue(E).getPointer();
+ }
+ assert(functionType->isFunctionType());
+ CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(),
+ E->getReferencedDeclOfCallee());
+ CGCallee callee(calleeInfo, calleePtr);
+ return callee;
}
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
@@ -3982,22 +4086,15 @@ LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
AlignmentSource::Decl);
}
-RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
+RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,
const CallExpr *E, ReturnValueSlot ReturnValue,
- CGCalleeInfo CalleeInfo, llvm::Value *Chain) {
+ llvm::Value *Chain) {
// Get the actual function type. The callee type will always be a pointer to
// function type or a block pointer type.
assert(CalleeType->isFunctionPointerType() &&
"Call must have function pointer type!");
- // Preserve the non-canonical function type because things like exception
- // specifications disappear in the canonical type. That information is useful
- // to drive the generation of more accurate code for this call later on.
- const FunctionProtoType *NonCanonicalFTP = CalleeType->getAs<PointerType>()
- ->getPointeeType()
- ->getAs<FunctionProtoType>();
-
- const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+ const Decl *TargetDecl = OrigCallee.getAbstractInfo().getCalleeDecl();
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
// We can only guarantee that a function is called from the correct
@@ -4015,6 +4112,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
const auto *FnType =
cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
+ CGCallee Callee = OrigCallee;
+
if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) &&
(!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
if (llvm::Constant *PrefixSig =
@@ -4029,8 +4128,10 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
llvm::StructType *PrefixStructTy = llvm::StructType::get(
CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true);
+ llvm::Value *CalleePtr = Callee.getFunctionPointer();
+
llvm::Value *CalleePrefixStruct = Builder.CreateBitCast(
- Callee, llvm::PointerType::getUnqual(PrefixStructTy));
+ CalleePtr, llvm::PointerType::getUnqual(PrefixStructTy));
llvm::Value *CalleeSigPtr =
Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
llvm::Value *CalleeSig =
@@ -4053,7 +4154,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
EmitCheckTypeDescriptor(CalleeType)
};
EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
- "function_type_mismatch", StaticData, Callee);
+ SanitizerHandler::FunctionTypeMismatch, StaticData, CalleePtr);
Builder.CreateBr(Cont);
EmitBlock(Cont);
@@ -4070,7 +4171,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
- llvm::Value *CastedCallee = Builder.CreateBitCast(Callee, Int8PtrTy);
+ llvm::Value *CalleePtr = Callee.getFunctionPointer();
+ llvm::Value *CastedCallee = Builder.CreateBitCast(CalleePtr, Int8PtrTy);
llvm::Value *TypeTest = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedCallee, TypeId});
@@ -4085,7 +4187,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
CastedCallee, StaticData);
} else {
EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
- "cfi_check_fail", StaticData,
+ SanitizerHandler::CFICheckFail, StaticData,
{CastedCallee, llvm::UndefValue::get(IntPtrTy)});
}
}
@@ -4094,8 +4196,35 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
if (Chain)
Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)),
CGM.getContext().VoidPtrTy);
+
+ // C++17 requires that we evaluate arguments to a call using assignment syntax
+ // right-to-left, and that we evaluate arguments to certain other operators
+ // left-to-right. Note that we allow this to override the order dictated by
+ // the calling convention on the MS ABI, which means that parameter
+ // destruction order is not necessarily reverse construction order.
+ // FIXME: Revisit this based on C++ committee response to unimplementability.
+ EvaluationOrder Order = EvaluationOrder::Default;
+ if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (OCE->isAssignmentOp())
+ Order = EvaluationOrder::ForceRightToLeft;
+ else {
+ switch (OCE->getOperator()) {
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ case OO_AmpAmp:
+ case OO_PipePipe:
+ case OO_Comma:
+ case OO_ArrowStar:
+ Order = EvaluationOrder::ForceLeftToRight;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(),
- E->getDirectCallee(), /*ParamsToSkip*/ 0);
+ E->getDirectCallee(), /*ParamsToSkip*/ 0, Order);
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
Args, FnType, /*isChainCall=*/Chain);
@@ -4123,11 +4252,13 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
if (isa<FunctionNoProtoType>(FnType) || Chain) {
llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
CalleeTy = CalleeTy->getPointerTo();
- Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
+
+ llvm::Value *CalleePtr = Callee.getFunctionPointer();
+ CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");
+ Callee.setFunctionPointer(CalleePtr);
}
- return EmitCall(FnInfo, Callee, ReturnValue, Args,
- CGCalleeInfo(NonCanonicalFTP, TargetDecl));
+ return EmitCall(FnInfo, Callee, ReturnValue, Args);
}
LValue CodeGenFunction::
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 6d18843591f3..009244784e50 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -164,6 +164,8 @@ public:
void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
void VisitChooseExpr(const ChooseExpr *CE);
void VisitInitListExpr(InitListExpr *E);
+ void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
+ llvm::Value *outerBegin = nullptr);
void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
@@ -749,7 +751,9 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -1049,7 +1053,8 @@ static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
return true;
// (int*)0 - Null pointer expressions.
if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
- return ICE->getCastKind() == CK_NullToPointer;
+ return ICE->getCastKind() == CK_NullToPointer &&
+ CGF.getTypes().isPointerZeroInitializable(E->getType());
// '\0'
if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
return CL->getValue() == 0;
@@ -1144,15 +1149,15 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
+ if (E->isTransparent())
+ return Visit(E->getInit(0));
+
AggValueSlot Dest = EnsureSlot(E->getType());
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
- if (E->isStringLiteralInit())
- return Visit(E->getInit(0));
-
QualType elementType =
CGF.getContext().getAsArrayType(E->getType())->getElementType();
@@ -1161,16 +1166,6 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
return;
}
- if (E->getType()->isAtomicType()) {
- // An _Atomic(T) object can be list-initialized from an expression
- // of the same type.
- assert(E->getNumInits() == 1 &&
- CGF.getContext().hasSameUnqualifiedType(E->getInit(0)->getType(),
- E->getType()) &&
- "unexpected list initialization for atomic object");
- return Visit(E->getInit(0));
- }
-
assert(E->getType()->isRecordType() && "Only support structs/unions here!");
// Do struct initialization; this code just sets each individual member
@@ -1316,6 +1311,98 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
cleanupDominator->eraseFromParent();
}
+void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
+ llvm::Value *outerBegin) {
+ // Emit the common subexpression.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
+
+ Address destPtr = EnsureSlot(E->getType()).getAddress();
+ uint64_t numElements = E->getArraySize().getZExtValue();
+
+ if (!numElements)
+ return;
+
+ // destPtr is an array*. Construct an elementType* by drilling down a level.
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ llvm::Value *indices[] = {zero, zero};
+ llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
+ "arrayinit.begin");
+
+ // Prepare to special-case multidimensional array initialization: we avoid
+ // emitting multiple destructor loops in that case.
+ if (!outerBegin)
+ outerBegin = begin;
+ ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
+
+ QualType elementType =
+ CGF.getContext().getAsArrayType(E->getType())->getElementType();
+ CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+ CharUnits elementAlign =
+ destPtr.getAlignment().alignmentOfArrayElement(elementSize);
+
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
+
+ // Jump into the body.
+ CGF.EmitBlock(bodyBB);
+ llvm::PHINode *index =
+ Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
+ index->addIncoming(zero, entryBB);
+ llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
+
+ // Prepare for a cleanup.
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ EHScopeStack::stable_iterator cleanup;
+ if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
+ if (outerBegin->getType() != element->getType())
+ outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
+ CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
+ elementAlign,
+ CGF.getDestroyer(dtorKind));
+ cleanup = CGF.EHStack.stable_begin();
+ } else {
+ dtorKind = QualType::DK_none;
+ }
+
+ // Emit the actual filler expression.
+ {
+ // Temporaries created in an array initialization loop are destroyed
+ // at the end of each iteration.
+ CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
+ CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
+ LValue elementLV =
+ CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
+
+ if (InnerLoop) {
+ // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
+ auto elementSlot = AggValueSlot::forLValue(
+ elementLV, AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased);
+ AggExprEmitter(CGF, elementSlot, false)
+ .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
+ } else
+ EmitInitializationToLValue(E->getSubExpr(), elementLV);
+ }
+
+ // Move on to the next element.
+ llvm::Value *nextIndex = Builder.CreateNUWAdd(
+ index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
+ index->addIncoming(nextIndex, Builder.GetInsertBlock());
+
+ // Leave the loop if we're done.
+ llvm::Value *done = Builder.CreateICmpEQ(
+ nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
+ "arrayinit.done");
+ llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
+ Builder.CreateCondBr(done, endBB, bodyBB);
+
+ CGF.EmitBlock(endBB);
+
+ // Leave the partial-array cleanup if we entered one.
+ if (dtorKind)
+ CGF.DeactivateCleanupBlock(cleanup, index);
+}
+
void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
AggValueSlot Dest = EnsureSlot(E->getType());
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index eec2aceb88a2..71c8fb8b7ae3 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -28,25 +28,18 @@ static RequiredArgs
commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
llvm::Value *This, llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *CE,
- CallArgList &Args) {
+ CallArgList &Args, CallArgList *RtlArgs) {
assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
isa<CXXOperatorCallExpr>(CE));
assert(MD->isInstance() &&
"Trying to emit a member or operator call expr on a static method!");
-
- // C++11 [class.mfct.non-static]p2:
- // If a non-static member function of a class X is called for an object that
- // is not of type X, or of a type derived from X, the behavior is undefined.
- SourceLocation CallLoc;
- if (CE)
- CallLoc = CE->getExprLoc();
- CGF.EmitTypeCheck(
- isa<CXXConstructorDecl>(MD) ? CodeGenFunction::TCK_ConstructorCall
- : CodeGenFunction::TCK_MemberCall,
- CallLoc, This, CGF.getContext().getRecordType(MD->getParent()));
+ ASTContext &C = CGF.getContext();
// Push the this ptr.
- Args.add(RValue::get(This), MD->getThisType(CGF.getContext()));
+ const CXXRecordDecl *RD =
+ CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
+ Args.add(RValue::get(This),
+ RD ? C.getPointerType(C.getTypeDeclType(RD)) : C.VoidPtrTy);
// If there is an implicit parameter (e.g. VTT), emit it.
if (ImplicitParam) {
@@ -57,7 +50,12 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size(), MD);
// And the rest of the call args.
- if (CE) {
+ if (RtlArgs) {
+ // Special case: if the caller emitted the arguments right-to-left already
+ // (prior to emitting the *this argument), we're done. This happens for
+ // assignment operators.
+ Args.addFrom(*RtlArgs);
+ } else if (CE) {
// Special case: skip first argument of CXXOperatorCall (it is "this").
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
@@ -71,26 +69,78 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
}
RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
- const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
+ const CXXMethodDecl *MD, const CGCallee &Callee,
+ ReturnValueSlot ReturnValue,
llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
- const CallExpr *CE) {
+ const CallExpr *CE, CallArgList *RtlArgs) {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
CallArgList Args;
RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
- *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args);
- return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
- Callee, ReturnValue, Args, MD);
+ *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
+ auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required);
+ return EmitCall(FnInfo, Callee, ReturnValue, Args);
}
RValue CodeGenFunction::EmitCXXDestructorCall(
- const CXXDestructorDecl *DD, llvm::Value *Callee, llvm::Value *This,
+ const CXXDestructorDecl *DD, const CGCallee &Callee, llvm::Value *This,
llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
StructorType Type) {
CallArgList Args;
commonEmitCXXMemberOrOperatorCall(*this, DD, This, ImplicitParam,
- ImplicitParamTy, CE, Args);
+ ImplicitParamTy, CE, Args, nullptr);
return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(DD, Type),
- Callee, ReturnValueSlot(), Args, DD);
+ Callee, ReturnValueSlot(), Args);
+}
+
+RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
+ const CXXPseudoDestructorExpr *E) {
+ QualType DestroyedType = E->getDestroyedType();
+ if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
+ // Automatic Reference Counting:
+ // If the pseudo-expression names a retainable object with weak or
+ // strong lifetime, the object shall be released.
+ Expr *BaseExpr = E->getBase();
+ Address BaseValue = Address::invalid();
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (E->isArrow()) {
+ BaseValue = EmitPointerWithAlignment(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ switch (DestroyedType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCRelease(Builder.CreateLoad(BaseValue,
+ DestroyedType.isVolatileQualified()),
+ ARCPreciseLifetime);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCDestroyWeak(BaseValue);
+ break;
+ }
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitIgnoredExpr(E->getBase());
+ }
+
+ return RValue::get(nullptr);
}
static CXXRecordDecl *getCXXRecord(const Expr *E) {
@@ -115,8 +165,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
- llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
- return EmitCall(getContext().getPointerType(MD->getType()), Callee, CE,
+ CGCallee callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD), MD);
+ return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
ReturnValue);
}
@@ -166,6 +216,19 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
}
}
+ // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
+ // operator before the LHS.
+ CallArgList RtlArgStorage;
+ CallArgList *RtlArgs = nullptr;
+ if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ if (OCE->isAssignmentOp()) {
+ RtlArgs = &RtlArgStorage;
+ EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
+ drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
+ /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
+ }
+ }
+
Address This = Address::invalid();
if (IsArrow)
This = EmitPointerWithAlignment(Base);
@@ -183,10 +246,12 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
// We don't like to generate the trivial copy/move assignment operator
// when it isn't necessary; just produce the proper effect here.
- // Special case: skip first argument of CXXOperatorCall (it is "this").
- unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
- Address RHS = EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
- EmitAggregateAssign(This, RHS, CE->getType());
+ LValue RHS = isa<CXXOperatorCallExpr>(CE)
+ ? MakeNaturalAlignAddrLValue(
+ (*RtlArgs)[0].RV.getScalarVal(),
+ (*(CE->arg_begin() + 1))->getType())
+ : EmitLValue(*CE->arg_begin());
+ EmitAggregateAssign(This, RHS.getAddress(), CE->getType());
return RValue::get(This.getPointer());
}
@@ -217,6 +282,22 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
+ // C++11 [class.mfct.non-static]p2:
+ // If a non-static member function of a class X is called for an object that
+ // is not of type X, or of a type derived from X, the behavior is undefined.
+ SourceLocation CallLoc;
+ ASTContext &C = getContext();
+ if (CE)
+ CallLoc = CE->getExprLoc();
+
+ EmitTypeCheck(isa<CXXConstructorDecl>(CalleeDecl)
+ ? CodeGenFunction::TCK_ConstructorCall
+ : CodeGenFunction::TCK_MemberCall,
+ CallLoc, This.getPointer(), C.getRecordType(CalleeDecl->getParent()));
+
+ // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
+ // 'CalleeDecl' instead.
+
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
// virtual call mechanism.
@@ -224,8 +305,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
// We also don't emit a virtual call if the base expression has a record type
// because then we know what the type is.
bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
- llvm::Value *Callee;
-
+
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
assert(CE->arg_begin() == CE->arg_end() &&
"Destructor shouldn't have explicit parameters");
@@ -234,24 +314,32 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
CGM.getCXXABI().EmitVirtualDestructorCall(
*this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
} else {
+ CGCallee Callee;
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee =
- CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty);
+ Callee = CGCallee::forDirect(
+ CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty),
+ Dtor);
else {
const CXXDestructorDecl *DDtor =
cast<CXXDestructorDecl>(DevirtualizedMethod);
- Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
+ Callee = CGCallee::forDirect(
+ CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
+ DDtor);
}
- EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
- /*ImplicitParam=*/nullptr, QualType(), CE);
+ EmitCXXMemberOrOperatorCall(
+ CalleeDecl, Callee, ReturnValue, This.getPointer(),
+ /*ImplicitParam=*/nullptr, QualType(), CE, nullptr);
}
return RValue::get(nullptr);
}
+ CGCallee Callee;
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
- Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
+ Callee = CGCallee::forDirect(
+ CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
+ Ctor);
} else if (UseVirtualCall) {
Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty,
CE->getLocStart());
@@ -266,9 +354,11 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee = CGM.GetAddrOfFunction(MD, Ty);
+ Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), MD);
else {
- Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
+ Callee = CGCallee::forDirect(
+ CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
+ DevirtualizedMethod);
}
}
@@ -277,8 +367,9 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
*this, CalleeDecl, This, UseVirtualCall);
}
- return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
- /*ImplicitParam=*/nullptr, QualType(), CE);
+ return EmitCXXMemberOrOperatorCall(
+ CalleeDecl, Callee, ReturnValue, This.getPointer(),
+ /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
}
RValue
@@ -297,9 +388,6 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- // Get the member function pointer.
- llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
-
// Emit the 'this' pointer.
Address This = Address::invalid();
if (BO->getOpcode() == BO_PtrMemI)
@@ -310,9 +398,12 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
QualType(MPT->getClass(), 0));
+ // Get the member function pointer.
+ llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
+
// Ask the ABI to load the callee. Note that This is modified.
llvm::Value *ThisPtrForCall = nullptr;
- llvm::Value *Callee =
+ CGCallee Callee =
CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
ThisPtrForCall, MemFnPtr, MPT);
@@ -851,8 +942,68 @@ void CodeGenFunction::EmitNewArrayInitializer(
CharUnits ElementAlign =
BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
+ // Attempt to perform zero-initialization using memset.
+ auto TryMemsetInitialization = [&]() -> bool {
+ // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
+ // we can initialize with a memset to -1.
+ if (!CGM.getTypes().isZeroInitializable(ElementType))
+ return false;
+
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+
+ // Subtract out the size of any elements we've already initialized.
+ auto *RemainingSize = AllocSizeWithoutCookie;
+ if (InitListElements) {
+ // We know this can't overflow; we check this when doing the allocation.
+ auto *InitializedSize = llvm::ConstantInt::get(
+ RemainingSize->getType(),
+ getContext().getTypeSizeInChars(ElementType).getQuantity() *
+ InitListElements);
+ RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
+ }
+
+ // Create the memset.
+ Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
+ return true;
+ };
+
// If the initializer is an initializer list, first do the explicit elements.
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ // Initializing from a (braced) string literal is a special case; the init
+ // list element does not initialize a (single) array element.
+ if (ILE->isStringLiteralInit()) {
+ // Initialize the initial portion of length equal to that of the string
+ // literal. The allocation must be for at least this much; we emitted a
+ // check for that earlier.
+ AggValueSlot Slot =
+ AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(ILE->getInit(0), Slot);
+
+ // Move past these elements.
+ InitListElements =
+ cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
+ ->getSize().getZExtValue();
+ CurPtr =
+ Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
+ Builder.getSize(InitListElements),
+ "string.init.end"),
+ CurPtr.getAlignment().alignmentAtOffset(InitListElements *
+ ElementSize));
+
+ // Zero out the rest, if any remain.
+ llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
+ if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {
+ bool OK = TryMemsetInitialization();
+ (void)OK;
+ assert(OK && "couldn't memset character type?");
+ }
+ return;
+ }
+
InitListElements = ILE->getNumInits();
// If this is a multi-dimensional array new, we will initialize multiple
@@ -919,32 +1070,6 @@ void CodeGenFunction::EmitNewArrayInitializer(
CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
}
- // Attempt to perform zero-initialization using memset.
- auto TryMemsetInitialization = [&]() -> bool {
- // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
- // we can initialize with a memset to -1.
- if (!CGM.getTypes().isZeroInitializable(ElementType))
- return false;
-
- // Optimization: since zero initialization will just set the memory
- // to all zeroes, generate a single memset to do it in one shot.
-
- // Subtract out the size of any elements we've already initialized.
- auto *RemainingSize = AllocSizeWithoutCookie;
- if (InitListElements) {
- // We know this can't overflow; we check this when doing the allocation.
- auto *InitializedSize = llvm::ConstantInt::get(
- RemainingSize->getType(),
- getContext().getTypeSizeInChars(ElementType).getQuantity() *
- InitListElements);
- RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
- }
-
- // Create the memset.
- Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
- return true;
- };
-
// If all elements have already been initialized, skip any further
// initialization.
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
@@ -1110,23 +1235,24 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
/// Emit a call to an operator new or operator delete function, as implicitly
/// created by new-expressions and delete-expressions.
static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
- const FunctionDecl *Callee,
+ const FunctionDecl *CalleeDecl,
const FunctionProtoType *CalleeType,
const CallArgList &Args) {
llvm::Instruction *CallOrInvoke;
- llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
+ llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, CalleeDecl);
RValue RV =
CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
Args, CalleeType, /*chainCall=*/false),
- CalleeAddr, ReturnValueSlot(), Args, Callee, &CallOrInvoke);
+ Callee, ReturnValueSlot(), Args, &CallOrInvoke);
/// C++1y [expr.new]p10:
/// [In a new-expression,] an implementation is allowed to omit a call
/// to a replaceable global allocation function.
///
/// We model such elidable calls with the 'builtin' attribute.
- llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
- if (Callee->isReplaceableGlobalAllocationFunction() &&
+ llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
+ if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
// FIXME: Add addAttribute to CallSite.
if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
@@ -1159,111 +1285,116 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
llvm_unreachable("predeclared global operator new/delete is missing");
}
-namespace {
- /// A cleanup to call the given 'operator delete' function upon
- /// abnormal exit from a new expression.
- class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
- size_t NumPlacementArgs;
- const FunctionDecl *OperatorDelete;
- llvm::Value *Ptr;
- llvm::Value *AllocSize;
-
- RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
+static std::pair<bool, bool>
+shouldPassSizeAndAlignToUsualDelete(const FunctionProtoType *FPT) {
+ auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
- public:
- static size_t getExtraSize(size_t NumPlacementArgs) {
- return NumPlacementArgs * sizeof(RValue);
- }
+ // The first argument is always a void*.
+ ++AI;
- CallDeleteDuringNew(size_t NumPlacementArgs,
- const FunctionDecl *OperatorDelete,
- llvm::Value *Ptr,
- llvm::Value *AllocSize)
- : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
- Ptr(Ptr), AllocSize(AllocSize) {}
-
- void setPlacementArg(unsigned I, RValue Arg) {
- assert(I < NumPlacementArgs && "index out of range");
- getPlacementArgs()[I] = Arg;
- }
-
- void Emit(CodeGenFunction &CGF, Flags flags) override {
- const FunctionProtoType *FPT
- = OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
- (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
+ // Figure out what other parameters we should be implicitly passing.
+ bool PassSize = false;
+ bool PassAlignment = false;
- CallArgList DeleteArgs;
-
- // The first argument is always a void*.
- FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
- DeleteArgs.add(RValue::get(Ptr), *AI++);
-
- // A member 'operator delete' can take an extra 'size_t' argument.
- if (FPT->getNumParams() == NumPlacementArgs + 2)
- DeleteArgs.add(RValue::get(AllocSize), *AI++);
+ if (AI != AE && (*AI)->isIntegerType()) {
+ PassSize = true;
+ ++AI;
+ }
- // Pass the rest of the arguments, which must match exactly.
- for (unsigned I = 0; I != NumPlacementArgs; ++I)
- DeleteArgs.add(getPlacementArgs()[I], *AI++);
+ if (AI != AE && (*AI)->isAlignValT()) {
+ PassAlignment = true;
+ ++AI;
+ }
- // Call 'operator delete'.
- EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
- }
- };
+ assert(AI == AE && "unexpected usual deallocation function parameter");
+ return {PassSize, PassAlignment};
+}
- /// A cleanup to call the given 'operator delete' function upon
- /// abnormal exit from a new expression when the new expression is
- /// conditional.
- class CallDeleteDuringConditionalNew final : public EHScopeStack::Cleanup {
- size_t NumPlacementArgs;
+namespace {
+ /// A cleanup to call the given 'operator delete' function upon abnormal
+ /// exit from a new expression. Templated on a traits type that deals with
+ /// ensuring that the arguments dominate the cleanup if necessary.
+ template<typename Traits>
+ class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
+ /// Type used to hold llvm::Value*s.
+ typedef typename Traits::ValueTy ValueTy;
+ /// Type used to hold RValues.
+ typedef typename Traits::RValueTy RValueTy;
+ struct PlacementArg {
+ RValueTy ArgValue;
+ QualType ArgType;
+ };
+
+ unsigned NumPlacementArgs : 31;
+ unsigned PassAlignmentToPlacementDelete : 1;
const FunctionDecl *OperatorDelete;
- DominatingValue<RValue>::saved_type Ptr;
- DominatingValue<RValue>::saved_type AllocSize;
+ ValueTy Ptr;
+ ValueTy AllocSize;
+ CharUnits AllocAlign;
- DominatingValue<RValue>::saved_type *getPlacementArgs() {
- return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
+ PlacementArg *getPlacementArgs() {
+ return reinterpret_cast<PlacementArg *>(this + 1);
}
public:
static size_t getExtraSize(size_t NumPlacementArgs) {
- return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
+ return NumPlacementArgs * sizeof(PlacementArg);
}
- CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
- const FunctionDecl *OperatorDelete,
- DominatingValue<RValue>::saved_type Ptr,
- DominatingValue<RValue>::saved_type AllocSize)
- : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
- Ptr(Ptr), AllocSize(AllocSize) {}
-
- void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
+ CallDeleteDuringNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete, ValueTy Ptr,
+ ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
+ CharUnits AllocAlign)
+ : NumPlacementArgs(NumPlacementArgs),
+ PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
+ OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
+ AllocAlign(AllocAlign) {}
+
+ void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
assert(I < NumPlacementArgs && "index out of range");
- getPlacementArgs()[I] = Arg;
+ getPlacementArgs()[I] = {Arg, Type};
}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- const FunctionProtoType *FPT
- = OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
- (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
-
+ const FunctionProtoType *FPT =
+ OperatorDelete->getType()->getAs<FunctionProtoType>();
CallArgList DeleteArgs;
// The first argument is always a void*.
- FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
- DeleteArgs.add(Ptr.restore(CGF), *AI++);
-
- // A member 'operator delete' can take an extra 'size_t' argument.
- if (FPT->getNumParams() == NumPlacementArgs + 2) {
- RValue RV = AllocSize.restore(CGF);
- DeleteArgs.add(RV, *AI++);
+ DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
+
+ // Figure out what other parameters we should be implicitly passing.
+ bool PassSize = false;
+ bool PassAlignment = false;
+ if (NumPlacementArgs) {
+ // A placement deallocation function is implicitly passed an alignment
+ // if the placement allocation function was, but is never passed a size.
+ PassAlignment = PassAlignmentToPlacementDelete;
+ } else {
+ // For a non-placement new-expression, 'operator delete' can take a
+ // size and/or an alignment if it has the right parameters.
+ std::tie(PassSize, PassAlignment) =
+ shouldPassSizeAndAlignToUsualDelete(FPT);
}
+ // The second argument can be a std::size_t (for non-placement delete).
+ if (PassSize)
+ DeleteArgs.add(Traits::get(CGF, AllocSize),
+ CGF.getContext().getSizeType());
+
+ // The next (second or third) argument can be a std::align_val_t, which
+ // is an enum whose underlying type is std::size_t.
+ // FIXME: Use the right type as the parameter type. Note that in a call
+ // to operator delete(size_t, ...), we may not have it available.
+ if (PassAlignment)
+ DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
+ CGF.SizeTy, AllocAlign.getQuantity())),
+ CGF.getContext().getSizeType());
+
// Pass the rest of the arguments, which must match exactly.
for (unsigned I = 0; I != NumPlacementArgs; ++I) {
- RValue RV = getPlacementArgs()[I].restore(CGF);
- DeleteArgs.add(RV, *AI++);
+ auto Arg = getPlacementArgs()[I];
+ DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
}
// Call 'operator delete'.
@@ -1278,18 +1409,34 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
const CXXNewExpr *E,
Address NewPtr,
llvm::Value *AllocSize,
+ CharUnits AllocAlign,
const CallArgList &NewArgs) {
+ unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1;
+
// If we're not inside a conditional branch, then the cleanup will
// dominate and we can do the easier (and more efficient) thing.
if (!CGF.isInConditionalBranch()) {
- CallDeleteDuringNew *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
- E->getNumPlacementArgs(),
- E->getOperatorDelete(),
- NewPtr.getPointer(),
- AllocSize);
- for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
- Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
+ struct DirectCleanupTraits {
+ typedef llvm::Value *ValueTy;
+ typedef RValue RValueTy;
+ static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
+ static RValue get(CodeGenFunction &, RValueTy V) { return V; }
+ };
+
+ typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
+
+ DirectCleanup *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ NewPtr.getPointer(),
+ AllocSize,
+ E->passAlignment(),
+ AllocAlign);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
+ auto &Arg = NewArgs[I + NumNonPlacementArgs];
+ Cleanup->setPlacementArg(I, Arg.RV, Arg.Ty);
+ }
return;
}
@@ -1300,15 +1447,28 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
DominatingValue<RValue>::saved_type SavedAllocSize =
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
- CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
- E->getNumPlacementArgs(),
- E->getOperatorDelete(),
- SavedNewPtr,
- SavedAllocSize);
- for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
- Cleanup->setPlacementArg(I,
- DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
+ struct ConditionalCleanupTraits {
+ typedef DominatingValue<RValue>::saved_type ValueTy;
+ typedef DominatingValue<RValue>::saved_type RValueTy;
+ static RValue get(CodeGenFunction &CGF, ValueTy V) {
+ return V.restore(CGF);
+ }
+ };
+ typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
+
+ ConditionalCleanup *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ SavedNewPtr,
+ SavedAllocSize,
+ E->passAlignment(),
+ AllocAlign);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
+ auto &Arg = NewArgs[I + NumNonPlacementArgs];
+ Cleanup->setPlacementArg(I, DominatingValue<RValue>::save(CGF, Arg.RV),
+ Arg.Ty);
+ }
CGF.initFullExprCleanup();
}
@@ -1323,7 +1483,12 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// If there is a brace-initializer, cannot allocate fewer elements than inits.
unsigned minElements = 0;
if (E->isArray() && E->hasInitializer()) {
- if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
+ const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
+ if (ILE && ILE->isStringLiteralInit())
+ minElements =
+ cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
+ ->getSize().getZExtValue();
+ else if (ILE)
minElements = ILE->getNumInits();
}
@@ -1332,6 +1497,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Value *allocSize =
EmitCXXNewAllocSize(*this, E, minElements, numElements,
allocSizeWithoutCookie);
+ CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
// Emit the allocation call. If the allocator is a global placement
// operator, just "inline" it directly.
@@ -1347,10 +1513,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// The pointer expression will, in many cases, be an opaque void*.
// In these cases, discard the computed alignment and use the
// formal alignment of the allocated type.
- if (alignSource != AlignmentSource::Decl) {
- allocation = Address(allocation.getPointer(),
- getContext().getTypeAlignInChars(allocType));
- }
+ if (alignSource != AlignmentSource::Decl)
+ allocation = Address(allocation.getPointer(), allocAlign);
// Set up allocatorArgs for the call to operator delete if it's not
// the reserved global operator.
@@ -1363,28 +1527,55 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
} else {
const FunctionProtoType *allocatorType =
allocator->getType()->castAs<FunctionProtoType>();
+ unsigned ParamsToSkip = 0;
// The allocation size is the first argument.
QualType sizeType = getContext().getSizeType();
allocatorArgs.add(RValue::get(allocSize), sizeType);
+ ++ParamsToSkip;
+
+ if (allocSize != allocSizeWithoutCookie) {
+ CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
+ allocAlign = std::max(allocAlign, cookieAlign);
+ }
+
+ // The allocation alignment may be passed as the second argument.
+ if (E->passAlignment()) {
+ QualType AlignValT = sizeType;
+ if (allocatorType->getNumParams() > 1) {
+ AlignValT = allocatorType->getParamType(1);
+ assert(getContext().hasSameUnqualifiedType(
+ AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
+ sizeType) &&
+ "wrong type for alignment parameter");
+ ++ParamsToSkip;
+ } else {
+ // Corner case, passing alignment to 'operator new(size_t, ...)'.
+ assert(allocator->isVariadic() && "can't pass alignment to allocator");
+ }
+ allocatorArgs.add(
+ RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
+ AlignValT);
+ }
- // We start at 1 here because the first argument (the allocation size)
- // has already been emitted.
+ // FIXME: Why do we not pass a CalleeDecl here?
EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
- /* CalleeDecl */ nullptr,
- /*ParamsToSkip*/ 1);
+ /*CalleeDecl*/nullptr, /*ParamsToSkip*/ParamsToSkip);
RValue RV =
EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
- // For now, only assume that the allocation function returns
- // something satisfactorily aligned for the element type, plus
- // the cookie if we have one.
- CharUnits allocationAlign =
- getContext().getTypeAlignInChars(allocType);
- if (allocSize != allocSizeWithoutCookie) {
- CharUnits cookieAlign = getSizeAlign(); // FIXME?
- allocationAlign = std::max(allocationAlign, cookieAlign);
+ // If this was a call to a global replaceable allocation function that does
+ // not take an alignment argument, the allocator is known to produce
+ // storage that's suitably aligned for any object that fits, up to a known
+ // threshold. Otherwise assume it's suitably aligned for the allocated type.
+ CharUnits allocationAlign = allocAlign;
+ if (!E->passAlignment() &&
+ allocator->isReplaceableGlobalAllocationFunction()) {
+ unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
+ Target.getNewAlign(), getContext().getTypeSize(allocType)));
+ allocationAlign = std::max(
+ allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
}
allocation = Address(RV.getScalarVal(), allocationAlign);
@@ -1423,7 +1614,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Instruction *cleanupDominator = nullptr;
if (E->getOperatorDelete() &&
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
- EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
+ EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
+ allocatorArgs);
operatorDeleteCleanup = EHStack.stable_begin();
cleanupDominator = Builder.CreateUnreachable();
}
@@ -1485,31 +1677,58 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
}
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
- llvm::Value *Ptr,
- QualType DeleteTy) {
- assert(DeleteFD->getOverloadedOperator() == OO_Delete);
+ llvm::Value *Ptr, QualType DeleteTy,
+ llvm::Value *NumElements,
+ CharUnits CookieSize) {
+ assert((!NumElements && CookieSize.isZero()) ||
+ DeleteFD->getOverloadedOperator() == OO_Array_Delete);
const FunctionProtoType *DeleteFTy =
DeleteFD->getType()->getAs<FunctionProtoType>();
CallArgList DeleteArgs;
- // Check if we need to pass the size to the delete operator.
- llvm::Value *Size = nullptr;
- QualType SizeTy;
- if (DeleteFTy->getNumParams() == 2) {
- SizeTy = DeleteFTy->getParamType(1);
- CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
- Size = llvm::ConstantInt::get(ConvertType(SizeTy),
- DeleteTypeSize.getQuantity());
- }
+ std::pair<bool, bool> PassSizeAndAlign =
+ shouldPassSizeAndAlignToUsualDelete(DeleteFTy);
+
+ auto ParamTypeIt = DeleteFTy->param_type_begin();
- QualType ArgTy = DeleteFTy->getParamType(0);
+ // Pass the pointer itself.
+ QualType ArgTy = *ParamTypeIt++;
llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
- if (Size)
- DeleteArgs.add(RValue::get(Size), SizeTy);
+ // Pass the size if the delete function has a size_t parameter.
+ if (PassSizeAndAlign.first) {
+ QualType SizeType = *ParamTypeIt++;
+ CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
+ llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
+ DeleteTypeSize.getQuantity());
+
+ // For array new, multiply by the number of elements.
+ if (NumElements)
+ Size = Builder.CreateMul(Size, NumElements);
+
+ // If there is a cookie, add the cookie size.
+ if (!CookieSize.isZero())
+ Size = Builder.CreateAdd(
+ Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
+
+ DeleteArgs.add(RValue::get(Size), SizeType);
+ }
+
+ // Pass the alignment if the delete function has an align_val_t parameter.
+ if (PassSizeAndAlign.second) {
+ QualType AlignValType = *ParamTypeIt++;
+ CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits(
+ getContext().getTypeAlignIfKnown(DeleteTy));
+ llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
+ DeleteTypeAlign.getQuantity());
+ DeleteArgs.add(RValue::get(Align), AlignValType);
+ }
+
+ assert(ParamTypeIt == DeleteFTy->param_type_end() &&
+ "unknown parameter to usual delete function");
// Emit the call to delete.
EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
@@ -1546,6 +1765,15 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
Address Ptr,
QualType ElementType) {
+ // C++11 [expr.delete]p3:
+ // If the static type of the object to be deleted is different from its
+ // dynamic type, the static type shall be a base class of the dynamic type
+ // of the object to be deleted and the static type shall have a virtual
+ // destructor or the behavior is undefined.
+ CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
+ DE->getExprLoc(), Ptr.getPointer(),
+ ElementType);
+
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = nullptr;
@@ -1613,45 +1841,8 @@ namespace {
ElementType(ElementType), CookieSize(CookieSize) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- const FunctionProtoType *DeleteFTy =
- OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(DeleteFTy->getNumParams() == 1 || DeleteFTy->getNumParams() == 2);
-
- CallArgList Args;
-
- // Pass the pointer as the first argument.
- QualType VoidPtrTy = DeleteFTy->getParamType(0);
- llvm::Value *DeletePtr
- = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
- Args.add(RValue::get(DeletePtr), VoidPtrTy);
-
- // Pass the original requested size as the second argument.
- if (DeleteFTy->getNumParams() == 2) {
- QualType size_t = DeleteFTy->getParamType(1);
- llvm::IntegerType *SizeTy
- = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
-
- CharUnits ElementTypeSize =
- CGF.CGM.getContext().getTypeSizeInChars(ElementType);
-
- // The size of an element, multiplied by the number of elements.
- llvm::Value *Size
- = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
- if (NumElements)
- Size = CGF.Builder.CreateMul(Size, NumElements);
-
- // Plus the size of the cookie if applicable.
- if (!CookieSize.isZero()) {
- llvm::Value *CookieSizeV
- = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
- Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
- }
-
- Args.add(RValue::get(Size), size_t);
- }
-
- // Emit the call to delete.
- EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
+ CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
+ CookieSize);
}
};
}
@@ -1949,10 +2140,7 @@ void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
auto VAT = CurField->getCapturedVLAType();
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
} else {
- ArrayRef<VarDecl *> ArrayIndexes;
- if (CurField->getType()->isArrayType())
- ArrayIndexes = E->getCaptureInitIndexVars(i);
- EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ EmitInitializerForField(*CurField, LV, *i);
}
}
}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 22910d931ded..59bc9cdbc056 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -13,12 +13,9 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include "clang/AST/ASTContext.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
@@ -483,7 +480,9 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
@@ -600,10 +599,10 @@ ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
- llvm::Instruction *Call;
+ CGCallee Callee = CGCallee::forDirect(Func, FQTy->getAs<FunctionProtoType>());
- RValue Res = CGF.EmitCall(FuncInfo, Func, ReturnValueSlot(), Args,
- FQTy->getAs<FunctionProtoType>(), &Call);
+ llvm::Instruction *Call;
+ RValue Res = CGF.EmitCall(FuncInfo, Callee, ReturnValueSlot(), Args, &Call);
cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
return Res.getComplexVal();
}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 803b39907dd7..3db15c646f43 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -16,6 +16,7 @@
#include "CGObjCRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
+#include "TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecordLayout.h"
@@ -690,6 +691,9 @@ public:
case CK_ConstructorConversion:
return C;
+ case CK_IntToOCLSampler:
+ llvm_unreachable("global sampler variables are not generated");
+
case CK_Dependent: llvm_unreachable("saw dependent cast!");
case CK_BuiltinFnToFnPtr:
@@ -749,6 +753,7 @@ public:
case CK_FloatingToBoolean:
case CK_FloatingCast:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
return nullptr;
}
llvm_unreachable("Invalid CastKind");
@@ -775,9 +780,6 @@ public:
}
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
- if (ILE->isStringLiteralInit())
- return Visit(ILE->getInit(0));
-
llvm::ArrayType *AType =
cast<llvm::ArrayType>(ConvertType(ILE->getType()));
llvm::Type *ElemTy = AType->getElementType();
@@ -842,6 +844,9 @@ public:
}
llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->isTransparent())
+ return Visit(ILE->getInit(0));
+
if (ILE->getType()->isArrayType())
return EmitArrayInitialization(ILE);
@@ -1018,16 +1023,17 @@ public:
switch (E->getStmtClass()) {
default: break;
case Expr::CompoundLiteralExprClass: {
- // Note that due to the nature of compound literals, this is guaranteed
- // to be the only use of the variable, so we just generate it here.
CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
+ if (llvm::GlobalVariable *Addr =
+ CGM.getAddrOfConstantCompoundLiteralIfEmitted(CLE))
+ return ConstantAddress(Addr, Align);
+
llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
CLE->getType(), CGF);
// FIXME: "Leaked" on failure.
if (!C) return ConstantAddress::invalid();
- CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
-
auto GV = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
E->getType().isConstant(CGM.getContext()),
llvm::GlobalValue::InternalLinkage,
@@ -1035,6 +1041,7 @@ public:
llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(E->getType()));
GV->setAlignment(Align.getQuantity());
+ CGM.setAddrOfConstantCompoundLiteral(CLE, GV);
return ConstantAddress(GV, Align);
}
case Expr::StringLiteralClass:
@@ -1083,7 +1090,7 @@ public:
return CGM.GetAddrOfConstantCFString(Literal);
}
case Expr::BlockExprClass: {
- std::string FunctionName;
+ StringRef FunctionName;
if (CGF)
FunctionName = CGF->CurFn->getName();
else
@@ -1091,7 +1098,7 @@ public:
// This is not really an l-value.
llvm::Constant *Ptr =
- CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName);
return ConstantAddress(Ptr, CGM.getPointerAlign());
}
case Expr::CXXTypeidExprClass: {
@@ -1259,6 +1266,10 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
return C;
}
+llvm::Constant *CodeGenModule::getNullPointer(llvm::PointerType *T, QualType QT) {
+ return getTargetCodeGenInfo().getNullPointer(*this, T, QT);
+}
+
llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
QualType DestType,
CodeGenFunction *CGF) {
@@ -1290,6 +1301,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
llvm::Constant *C = nullptr;
+
if (APValue::LValueBase LVBase = Value.getLValueBase()) {
// An array can be represented as an lvalue referring to the base.
if (isa<llvm::ArrayType>(DestTy)) {
@@ -1320,7 +1332,9 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
// Convert to the appropriate type; this could be an lvalue for
// an integer.
- if (isa<llvm::PointerType>(DestTy)) {
+ if (auto PT = dyn_cast<llvm::PointerType>(DestTy)) {
+ if (Value.isNullPointer())
+ return getNullPointer(PT, DestType);
// Convert the integer to a pointer-sized integer before converting it
// to a pointer.
C = llvm::ConstantExpr::getIntegerCast(
@@ -1354,7 +1368,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
}
case APValue::Float: {
const llvm::APFloat &Init = Value.getFloat();
- if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf &&
+ if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf() &&
!Context.getLangOpts().NativeHalfType &&
!Context.getLangOpts().HalfArgsAndReturns)
return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
@@ -1480,6 +1494,18 @@ CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
return C;
}
+llvm::GlobalVariable *CodeGenModule::getAddrOfConstantCompoundLiteralIfEmitted(
+ const CompoundLiteralExpr *E) {
+ return EmittedCompoundLiterals.lookup(E);
+}
+
+void CodeGenModule::setAddrOfConstantCompoundLiteral(
+ const CompoundLiteralExpr *CLE, llvm::GlobalVariable *GV) {
+ bool Ok = EmittedCompoundLiterals.insert(std::make_pair(CLE, GV)).second;
+ (void)Ok;
+ assert(Ok && "CLE has already been emitted!");
+}
+
ConstantAddress
CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
assert(E->isFileScope() && "not a file-scope compound literal expr");
@@ -1507,7 +1533,7 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
const CXXRecordDecl *base);
static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
- const CXXRecordDecl *record,
+ const RecordDecl *record,
bool asCompleteObject) {
const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
llvm::StructType *structure =
@@ -1517,24 +1543,29 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
unsigned numElements = structure->getNumElements();
std::vector<llvm::Constant *> elements(numElements);
+ auto CXXR = dyn_cast<CXXRecordDecl>(record);
// Fill in all the bases.
- for (const auto &I : record->bases()) {
- if (I.isVirtual()) {
- // Ignore virtual bases; if we're laying out for a complete
- // object, we'll lay these out later.
- continue;
- }
+ if (CXXR) {
+ for (const auto &I : CXXR->bases()) {
+ if (I.isVirtual()) {
+ // Ignore virtual bases; if we're laying out for a complete
+ // object, we'll lay these out later.
+ continue;
+ }
- const CXXRecordDecl *base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
- // Ignore empty bases.
- if (base->isEmpty())
- continue;
-
- unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
- llvm::Type *baseType = structure->getElementType(fieldIndex);
- elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ // Ignore empty bases.
+ if (base->isEmpty() ||
+ CGM.getContext().getASTRecordLayout(base).getNonVirtualSize()
+ .isZero())
+ continue;
+
+ unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
}
// Fill in all the fields.
@@ -1558,8 +1589,8 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
}
// Fill in the virtual bases, if we're working with the complete object.
- if (asCompleteObject) {
- for (const auto &I : record->vbases()) {
+ if (CXXR && asCompleteObject) {
+ for (const auto &I : CXXR->vbases()) {
const CXXRecordDecl *base =
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
@@ -1601,6 +1632,10 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
}
llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
+ if (T->getAs<PointerType>())
+ return getNullPointer(
+ cast<llvm::PointerType>(getTypes().ConvertTypeForMem(T)), T);
+
if (getTypes().isZeroInitializable(T))
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
@@ -1616,10 +1651,8 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
return llvm::ConstantArray::get(ATy, Array);
}
- if (const RecordType *RT = T->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- return ::EmitNullConstant(*this, RD, /*complete object*/ true);
- }
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return ::EmitNullConstant(*this, RT->getDecl(), /*complete object*/ true);
assert(T->isMemberDataPointerType() &&
"Should only see pointers to data members here!");
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 120dacfbb011..1b85c45cd4be 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -19,6 +19,7 @@
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/TargetInfo.h"
@@ -171,9 +172,9 @@ public:
}
/// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
- Value *EmitPointerToBoolConversion(Value *V) {
- Value *Zero = llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(V->getType()));
+ Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
+ Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
+
return Builder.CreateICmpNE(V, Zero, "tobool");
}
@@ -310,6 +311,12 @@ public:
Value *VisitInitListExpr(InitListExpr *E);
+ Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
+ assert(CGF.getArrayInitIndex() &&
+ "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
+ return CGF.getArrayInitIndex();
+ }
+
Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
return EmitNullValue(E->getType());
}
@@ -591,7 +598,7 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
return EmitIntToBoolConversion(Src);
assert(isa<llvm::PointerType>(Src->getType()));
- return EmitPointerToBoolConversion(Src);
+ return EmitPointerToBoolConversion(Src, SrcType);
}
void ScalarExprEmitter::EmitFloatConversionCheck(
@@ -724,7 +731,7 @@ void ScalarExprEmitter::EmitFloatConversionCheck(
CGF.EmitCheckTypeDescriptor(OrigSrcType),
CGF.EmitCheckTypeDescriptor(DstType)};
CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
- "float_cast_overflow", StaticArgs, OrigSrc);
+ SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
}
/// Emit a conversion from the specified type to the specified destination type,
@@ -787,7 +794,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Handle pointer conversions next: pointers can only be converted to/from
// other pointers and integers. Check for pointer types in terms of LLVM, as
// some native types (like Obj-C id) may map to a pointer type.
- if (isa<llvm::PointerType>(DstTy)) {
+ if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
// The source value may be an integer, or a pointer.
if (isa<llvm::PointerType>(SrcTy))
return Builder.CreateBitCast(Src, DstTy, "conv");
@@ -795,7 +802,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
// First, convert to the correct width so that we control the kind of
// extension.
- llvm::Type *MiddleTy = CGF.IntPtrTy;
+ llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -927,7 +934,7 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
void ScalarExprEmitter::EmitBinOpCheck(
ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
assert(CGF.IsSanitizerScope);
- StringRef CheckName;
+ SanitizerHandler Check;
SmallVector<llvm::Constant *, 4> StaticData;
SmallVector<llvm::Value *, 2> DynamicData;
@@ -938,13 +945,13 @@ void ScalarExprEmitter::EmitBinOpCheck(
StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
if (UO && UO->getOpcode() == UO_Minus) {
- CheckName = "negate_overflow";
+ Check = SanitizerHandler::NegateOverflow;
StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
DynamicData.push_back(Info.RHS);
} else {
if (BinaryOperator::isShiftOp(Opcode)) {
// Shift LHS negative or too large, or RHS out of bounds.
- CheckName = "shift_out_of_bounds";
+ Check = SanitizerHandler::ShiftOutOfBounds;
const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
StaticData.push_back(
CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
@@ -952,14 +959,14 @@ void ScalarExprEmitter::EmitBinOpCheck(
CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
} else if (Opcode == BO_Div || Opcode == BO_Rem) {
// Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
- CheckName = "divrem_overflow";
+ Check = SanitizerHandler::DivremOverflow;
StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
} else {
// Arithmetic overflow (+, -, *).
switch (Opcode) {
- case BO_Add: CheckName = "add_overflow"; break;
- case BO_Sub: CheckName = "sub_overflow"; break;
- case BO_Mul: CheckName = "mul_overflow"; break;
+ case BO_Add: Check = SanitizerHandler::AddOverflow; break;
+ case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
+ case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
default: llvm_unreachable("unexpected opcode for bin op check");
}
StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
@@ -968,7 +975,7 @@ void ScalarExprEmitter::EmitBinOpCheck(
DynamicData.push_back(Info.RHS);
}
- CGF.EmitCheck(Checks, CheckName, StaticData, DynamicData);
+ CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
}
//===----------------------------------------------------------------------===//
@@ -1394,11 +1401,23 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Builder.CreateBitCast(Src, DstTy);
}
case CK_AddressSpaceConversion: {
- Value *Src = Visit(const_cast<Expr*>(E));
+ Expr::EvalResult Result;
+ if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
+ Result.Val.isNullPointer()) {
+ // If E has side effect, it is emitted even if its final result is a
+ // null pointer. In that case, a DCE pass should be able to
+ // eliminate the useless instructions emitted during translating E.
+ if (Result.HasSideEffects)
+ Visit(E);
+ return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
+ ConvertType(DestTy)), DestTy);
+ }
// Since target may map different address spaces in AST to the same address
// space, an address space conversion may end up as a bitcast.
- return Builder.CreatePointerBitCastOrAddrSpaceCast(Src,
- ConvertType(DestTy));
+ auto *Src = Visit(E);
+ return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(CGF, Src,
+ E->getType(),
+ DestTy);
}
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
@@ -1453,8 +1472,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
if (MustVisitNullValue(E))
(void) Visit(E);
- return llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(ConvertType(DestTy)));
+ return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
+ DestTy);
case CK_NullToMemberPointer: {
if (MustVisitNullValue(E))
@@ -1510,12 +1529,13 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// First, convert to the correct width so that we control the kind of
// extension.
- llvm::Type *MiddleTy = CGF.IntPtrTy;
+ auto DestLLVMTy = ConvertType(DestTy);
+ llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
- return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
+ return Builder.CreateIntToPtr(IntResult, DestLLVMTy);
}
case CK_PointerToIntegral:
assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
@@ -1546,7 +1566,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_IntegralToBoolean:
return EmitIntToBoolConversion(Visit(E));
case CK_PointerToBoolean:
- return EmitPointerToBoolConversion(Visit(E));
+ return EmitPointerToBoolConversion(Visit(E), E->getType());
case CK_FloatingToBoolean:
return EmitFloatToBoolConversion(Visit(E));
case CK_MemberPointerToBoolean: {
@@ -1573,8 +1593,16 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return llvm::Constant::getNullValue(ConvertType(DestTy));
}
+ case CK_ZeroToOCLQueue: {
+ assert(DestTy->isQueueT() && "CK_ZeroToOCLQueue cast on non queue_t type");
+ return llvm::Constant::getNullValue(ConvertType(DestTy));
}
+ case CK_IntToOCLSampler:
+ return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
+
+ } // end of switch
+
llvm_unreachable("unknown scalar cast");
}
@@ -2273,8 +2301,13 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
- if (CGF.getLangOpts().OpenCL) {
- // OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
+ if (CGF.getLangOpts().OpenCL &&
+ !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
+ // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
+ // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
+ // build option allows an application to specify that single precision
+ // floating-point divide (x/y and 1/x) and sqrt used in the program
+ // source are correctly rounded.
llvm::Type *ValTy = Val->getType();
if (ValTy->isFloatTy() ||
(isa<llvm::VectorType>(ValTy) &&
@@ -2363,9 +2396,8 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
// Branch in case of overflow.
llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
- llvm::Function::iterator insertPt = initialBB->getIterator();
- llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn,
- &*std::next(insertPt));
+ llvm::BasicBlock *continueBB =
+ CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
Builder.CreateCondBr(overflow, overflowBB, continueBB);
@@ -2429,11 +2461,13 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
}
unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
- if (width != CGF.PointerWidthInBits) {
+ auto &DL = CGF.CGM.getDataLayout();
+ auto PtrTy = cast<llvm::PointerType>(pointer->getType());
+ if (width != DL.getTypeSizeInBits(PtrTy)) {
// Zero-extend or sign-extend the pointer value according to
// whether the index is signed or not.
bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
- index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned,
+ index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned,
"idx.ext");
}
@@ -3397,6 +3431,52 @@ static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
return Builder.CreateShuffleVector(Src, UnV, Mask);
}
+// Create cast instructions for converting LLVM value \p Src to LLVM type \p
+// DstTy. \p Src has the same size as \p DstTy. Both are single value types
+// but could be scalar or vectors of different lengths, and either can be
+// pointer.
+// There are 4 cases:
+// 1. non-pointer -> non-pointer : needs 1 bitcast
+// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
+// 3. pointer -> non-pointer
+// a) pointer -> intptr_t : needs 1 ptrtoint
+// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
+// 4. non-pointer -> pointer
+// a) intptr_t -> pointer : needs 1 inttoptr
+// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
+// Note: for cases 3b and 4b two casts are required since LLVM casts do not
+// allow casting directly between pointer types and non-integer non-pointer
+// types.
+static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
+ const llvm::DataLayout &DL,
+ Value *Src, llvm::Type *DstTy,
+ StringRef Name = "") {
+ auto SrcTy = Src->getType();
+
+ // Case 1.
+ if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
+ return Builder.CreateBitCast(Src, DstTy, Name);
+
+ // Case 2.
+ if (SrcTy->isPointerTy() && DstTy->isPointerTy())
+ return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
+
+ // Case 3.
+ if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
+ // Case 3b.
+ if (!DstTy->isIntegerTy())
+ Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
+ // Cases 3a and 3b.
+ return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
+ }
+
+ // Case 4b.
+ if (!SrcTy->isIntegerTy())
+ Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
+ // Cases 4a and 4b.
+ return Builder.CreateIntToPtr(Src, DstTy, Name);
+}
+
Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
llvm::Type *DstTy = ConvertType(E->getType());
@@ -3411,7 +3491,8 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// vector to get a vec4, then a bitcast if the target type is different.
if (NumElementsSrc == 3 && NumElementsDst != 3) {
Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
- Src = Builder.CreateBitCast(Src, DstTy);
+ Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
+ DstTy);
Src->setName("astype");
return Src;
}
@@ -3421,13 +3502,15 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// get a vec3.
if (NumElementsSrc != 3 && NumElementsDst == 3) {
auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
- Src = Builder.CreateBitCast(Src, Vec4Ty);
+ Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
+ Vec4Ty);
Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
Src->setName("astype");
return Src;
}
- return Builder.CreateBitCast(Src, DstTy, "astype");
+ return Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
+ Src, DstTy, "astype");
}
Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
diff --git a/lib/CodeGen/CGLoopInfo.cpp b/lib/CodeGen/CGLoopInfo.cpp
index 51474f16a018..28998ce8db44 100644
--- a/lib/CodeGen/CGLoopInfo.cpp
+++ b/lib/CodeGen/CGLoopInfo.cpp
@@ -20,14 +20,15 @@ using namespace clang::CodeGen;
using namespace llvm;
static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
- llvm::DebugLoc Location) {
+ const llvm::DebugLoc &StartLoc,
+ const llvm::DebugLoc &EndLoc) {
if (!Attrs.IsParallel && Attrs.VectorizeWidth == 0 &&
Attrs.InterleaveCount == 0 && Attrs.UnrollCount == 0 &&
Attrs.VectorizeEnable == LoopAttributes::Unspecified &&
Attrs.UnrollEnable == LoopAttributes::Unspecified &&
Attrs.DistributeEnable == LoopAttributes::Unspecified &&
- !Location)
+ !StartLoc && !EndLoc)
return nullptr;
SmallVector<Metadata *, 4> Args;
@@ -35,9 +36,14 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
auto TempNode = MDNode::getTemporary(Ctx, None);
Args.push_back(TempNode.get());
- // If we have a valid debug location for the loop, add it.
- if (Location)
- Args.push_back(Location.getAsMDNode());
+ // If we have a valid start debug location for the loop, add it.
+ if (StartLoc) {
+ Args.push_back(StartLoc.getAsMDNode());
+
+ // If we also have a valid end debug location for the loop, add it.
+ if (EndLoc)
+ Args.push_back(EndLoc.getAsMDNode());
+ }
// Setting vectorize.width
if (Attrs.VectorizeWidth > 0) {
@@ -112,23 +118,26 @@ void LoopAttributes::clear() {
UnrollCount = 0;
VectorizeEnable = LoopAttributes::Unspecified;
UnrollEnable = LoopAttributes::Unspecified;
+ DistributeEnable = LoopAttributes::Unspecified;
}
LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs,
- llvm::DebugLoc Location)
+ const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
: LoopID(nullptr), Header(Header), Attrs(Attrs) {
- LoopID = createMetadata(Header->getContext(), Attrs, Location);
+ LoopID = createMetadata(Header->getContext(), Attrs, StartLoc, EndLoc);
}
-void LoopInfoStack::push(BasicBlock *Header, llvm::DebugLoc Location) {
- Active.push_back(LoopInfo(Header, StagedAttrs, Location));
+void LoopInfoStack::push(BasicBlock *Header, const llvm::DebugLoc &StartLoc,
+ const llvm::DebugLoc &EndLoc) {
+ Active.push_back(LoopInfo(Header, StagedAttrs, StartLoc, EndLoc));
// Clear the attributes so nested loops do not inherit them.
StagedAttrs.clear();
}
void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
ArrayRef<const clang::Attr *> Attrs,
- llvm::DebugLoc Location) {
+ const llvm::DebugLoc &StartLoc,
+ const llvm::DebugLoc &EndLoc) {
// Identify loop hint attributes from Attrs.
for (const auto *Attr : Attrs) {
@@ -266,7 +275,7 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
}
/// Stage the attributes.
- push(Header, Location);
+ push(Header, StartLoc, EndLoc);
}
void LoopInfoStack::pop() {
diff --git a/lib/CodeGen/CGLoopInfo.h b/lib/CodeGen/CGLoopInfo.h
index a0111edde5de..15608c105dc7 100644
--- a/lib/CodeGen/CGLoopInfo.h
+++ b/lib/CodeGen/CGLoopInfo.h
@@ -16,7 +16,6 @@
#define LLVM_CLANG_LIB_CODEGEN_CGLOOPINFO_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Value.h"
@@ -68,7 +67,7 @@ class LoopInfo {
public:
/// \brief Construct a new LoopInfo for the loop with entry Header.
LoopInfo(llvm::BasicBlock *Header, const LoopAttributes &Attrs,
- llvm::DebugLoc Location);
+ const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc);
/// \brief Get the loop id metadata for this loop.
llvm::MDNode *getLoopID() const { return LoopID; }
@@ -100,14 +99,14 @@ public:
/// \brief Begin a new structured loop. The set of staged attributes will be
/// applied to the loop and then cleared.
- void push(llvm::BasicBlock *Header,
- llvm::DebugLoc Location = llvm::DebugLoc());
+ void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc,
+ const llvm::DebugLoc &EndLoc);
/// \brief Begin a new structured loop. Stage attributes from the Attrs list.
/// The staged attributes are applied to the loop and then cleared.
void push(llvm::BasicBlock *Header, clang::ASTContext &Ctx,
- llvm::ArrayRef<const Attr *> Attrs,
- llvm::DebugLoc Location = llvm::DebugLoc());
+ llvm::ArrayRef<const Attr *> Attrs, const llvm::DebugLoc &StartLoc,
+ const llvm::DebugLoc &EndLoc);
/// \brief End the current loop.
void pop();
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index db894ce67470..932b8a129e6e 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -589,9 +589,10 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
- llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
+ llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
+ CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args),
- fn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
}
/// Determine whether the given architecture supports unaligned atomic
@@ -852,11 +853,12 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
// Third argument is the helper function.
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
- llvm::Value *copyCppAtomicObjectFn =
+ llvm::Constant *copyCppAtomicObjectFn =
CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
+ CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
- copyCppAtomicObjectFn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
}
void
@@ -927,12 +929,13 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
}
case PropertyImplStrategy::GetSetProperty: {
- llvm::Value *getPropertyFn =
+ llvm::Constant *getPropertyFn =
CGM.getObjCRuntime().GetPropertyGetFunction();
if (!getPropertyFn) {
CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
return;
}
+ CGCallee callee = CGCallee::forDirect(getPropertyFn);
// Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
// FIXME: Can't this be simpler? This might even be worse than the
@@ -955,8 +958,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
llvm::Instruction *CallInstruction;
RValue RV = EmitCall(
getTypes().arrangeBuiltinFunctionCall(propType, args),
- getPropertyFn, ReturnValueSlot(), args, CGCalleeInfo(),
- &CallInstruction);
+ callee, ReturnValueSlot(), args, &CallInstruction);
if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
call->setTailCall();
@@ -1068,10 +1070,11 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
// FIXME: should this really always be false?
args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
- llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
+ llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
+ CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
- copyStructFn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
}
/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
@@ -1103,11 +1106,12 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
// Third argument is the helper function.
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
- llvm::Value *copyCppAtomicObjectFn =
+ llvm::Constant *fn =
CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
+ CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
- copyCppAtomicObjectFn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
}
@@ -1197,8 +1201,8 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
case PropertyImplStrategy::GetSetProperty:
case PropertyImplStrategy::SetPropertyAndExpressionGet: {
- llvm::Value *setOptimizedPropertyFn = nullptr;
- llvm::Value *setPropertyFn = nullptr;
+ llvm::Constant *setOptimizedPropertyFn = nullptr;
+ llvm::Constant *setPropertyFn = nullptr;
if (UseOptimizedSetter(CGM)) {
// 10.8 and iOS 6.0 code and GC is off
setOptimizedPropertyFn =
@@ -1236,8 +1240,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
if (setOptimizedPropertyFn) {
args.add(RValue::get(arg), getContext().getObjCIdType());
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn);
EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
- setOptimizedPropertyFn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
} else {
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
args.add(RValue::get(arg), getContext().getObjCIdType());
@@ -1247,8 +1252,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
getContext().BoolTy);
// FIXME: We shouldn't need to get the function info here, the runtime
// already should have computed it to build the function.
+ CGCallee callee = CGCallee::forDirect(setPropertyFn);
EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
- setPropertyFn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
}
return;
@@ -1450,13 +1456,14 @@ QualType CodeGenFunction::TypeOfSelfObject() {
}
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
- llvm::Constant *EnumerationMutationFn =
+ llvm::Constant *EnumerationMutationFnPtr =
CGM.getObjCRuntime().EnumerationMutationFunction();
-
- if (!EnumerationMutationFn) {
+ if (!EnumerationMutationFnPtr) {
CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
return;
}
+ CGCallee EnumerationMutationFn =
+ CGCallee::forDirect(EnumerationMutationFnPtr);
CGDebugInfo *DI = getDebugInfo();
if (DI)
@@ -1662,7 +1669,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
elementLValue = EmitLValue(cast<Expr>(S.getElement()));
EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
} else {
- EmitScalarInit(CurrentItem, elementLValue);
+ EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue,
+ /*isInit*/ true);
}
// If we do have an element variable, this assignment is the end of
@@ -1803,7 +1811,8 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
// If the target runtime doesn't naturally support ARC, emit weak
// references to the runtime support library. We don't really
// permit this to fail, but we need a particular relocation style.
- if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
+ if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() &&
+ !CGM.getTriple().isOSBinFormatCOFF()) {
f->setLinkage(llvm::Function::ExternalWeakLinkage);
} else if (fnName == "objc_retain" || fnName == "objc_release") {
// If we have Native ARC, set nonlazybind attribute for these APIs for
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index caafef84c333..fa2b3d81e29a 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -18,6 +18,7 @@
#include "CGCleanup.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -163,9 +164,8 @@ protected:
/// Helper function that generates a constant string and returns a pointer to
/// the start of the string. The result of this function can be used anywhere
/// where the C code specifies const char*.
- llvm::Constant *MakeConstantString(const std::string &Str,
- const std::string &Name="") {
- ConstantAddress Array = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ llvm::Constant *MakeConstantString(StringRef Str, const char *Name = "") {
+ ConstantAddress Array = CGM.GetAddrOfConstantCString(Str, Name);
return llvm::ConstantExpr::getGetElementPtr(Array.getElementType(),
Array.getPointer(), Zeros);
}
@@ -174,14 +174,14 @@ protected:
/// string value. This allows the linker to combine the strings between
/// different modules. Used for EH typeinfo names, selector strings, and a
/// few other things.
- llvm::Constant *ExportUniqueString(const std::string &Str,
- const std::string prefix) {
- std::string name = prefix + Str;
- auto *ConstStr = TheModule.getGlobalVariable(name);
+ llvm::Constant *ExportUniqueString(const std::string &Str, StringRef Prefix) {
+ std::string Name = Prefix.str() + Str;
+ auto *ConstStr = TheModule.getGlobalVariable(Name);
if (!ConstStr) {
llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str);
ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
- llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+ llvm::GlobalValue::LinkOnceODRLinkage,
+ value, Name);
}
return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
ConstStr, Zeros);
@@ -190,47 +190,17 @@ protected:
/// Generates a global structure, initialized by the elements in the vector.
/// The element types must match the types of the structure elements in the
/// first argument.
- llvm::GlobalVariable *MakeGlobal(llvm::StructType *Ty,
- ArrayRef<llvm::Constant *> V,
+ llvm::GlobalVariable *MakeGlobal(llvm::Constant *C,
CharUnits Align,
StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
- llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
- auto GV = new llvm::GlobalVariable(TheModule, Ty, false,
+ auto GV = new llvm::GlobalVariable(TheModule, C->getType(), false,
linkage, C, Name);
GV->setAlignment(Align.getQuantity());
return GV;
}
- /// Generates a global array. The vector must contain the same number of
- /// elements that the array type declares, of the type specified as the array
- /// element type.
- llvm::GlobalVariable *MakeGlobal(llvm::ArrayType *Ty,
- ArrayRef<llvm::Constant *> V,
- CharUnits Align,
- StringRef Name="",
- llvm::GlobalValue::LinkageTypes linkage
- =llvm::GlobalValue::InternalLinkage) {
- llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
- auto GV = new llvm::GlobalVariable(TheModule, Ty, false,
- linkage, C, Name);
- GV->setAlignment(Align.getQuantity());
- return GV;
- }
-
- /// Generates a global array, inferring the array type from the specified
- /// element type and the size of the initialiser.
- llvm::GlobalVariable *MakeGlobalArray(llvm::Type *Ty,
- ArrayRef<llvm::Constant *> V,
- CharUnits Align,
- StringRef Name="",
- llvm::GlobalValue::LinkageTypes linkage
- =llvm::GlobalValue::InternalLinkage) {
- llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
- return MakeGlobal(ArrayTy, V, Align, Name, linkage);
- }
-
/// Returns a property name and encoding string.
llvm::Constant *MakePropertyEncodingString(const ObjCPropertyDecl *PD,
const Decl *Container) {
@@ -238,8 +208,8 @@ protected:
if ((R.getKind() == ObjCRuntime::GNUstep) &&
(R.getVersion() >= VersionTuple(1, 6))) {
std::string NameAndAttributes;
- std::string TypeStr;
- CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ std::string TypeStr =
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container);
NameAndAttributes += '\0';
NameAndAttributes += TypeStr.length() + 3;
NameAndAttributes += TypeStr;
@@ -251,7 +221,7 @@ protected:
}
/// Push the property attributes into two structure fields.
- void PushPropertyAttributes(std::vector<llvm::Constant*> &Fields,
+ void PushPropertyAttributes(ConstantStructBuilder &Fields,
ObjCPropertyDecl *property, bool isSynthesized=true, bool
isDynamic=true) {
int attrs = property->getPropertyAttributes();
@@ -263,7 +233,7 @@ protected:
attrs &= ~ObjCPropertyDecl::OBJC_PR_strong;
}
// The first flags field has the same attribute values as clang uses internally
- Fields.push_back(llvm::ConstantInt::get(Int8Ty, attrs & 0xff));
+ Fields.addInt(Int8Ty, attrs & 0xff);
attrs >>= 8;
attrs <<= 2;
// For protocol properties, synthesized and dynamic have no meaning, so we
@@ -273,10 +243,10 @@ protected:
attrs |= isDynamic ? (1<<1) : 0;
// The second field is the next four fields left shifted by two, with the
// low bit set to indicate whether the field is synthesized or dynamic.
- Fields.push_back(llvm::ConstantInt::get(Int8Ty, attrs & 0xff));
+ Fields.addInt(Int8Ty, attrs & 0xff);
// Two padding fields
- Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
- Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
+ Fields.addInt(Int8Ty, 0);
+ Fields.addInt(Int8Ty, 0);
}
/// Ensures that the value has the required type, by inserting a bitcast if
@@ -590,11 +560,6 @@ public:
llvm::Constant *BuildByrefLayout(CodeGenModule &CGM, QualType T) override {
return NULLPtr;
}
-
- llvm::GlobalVariable *GetClassGlobal(StringRef Name,
- bool Weak = false) override {
- return nullptr;
- }
};
/// Class representing the legacy GCC Objective-C ABI. This is the default when
@@ -1152,8 +1117,7 @@ llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel) {
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF,
const ObjCMethodDecl *Method) {
- std::string SelTypes;
- CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
+ std::string SelTypes = CGM.getContext().getObjCEncodingForMethodDecl(Method);
return GetSelector(CGF, Method->getSelector(), SelTypes);
}
@@ -1233,14 +1197,15 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
llvm::Constant *typeName =
ExportUniqueString(className, "__objc_eh_typename_");
- std::vector<llvm::Constant*> fields;
- fields.push_back(BVtable);
- fields.push_back(typeName);
- llvm::Constant *TI =
- MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, nullptr),
- fields, CGM.getPointerAlign(),
- "__objc_eh_typeinfo_" + className,
- llvm::GlobalValue::LinkOnceODRLinkage);
+ ConstantInitBuilder builder(CGM);
+ auto fields = builder.beginStruct();
+ fields.add(BVtable);
+ fields.add(typeName);
+ llvm::Constant *TI =
+ fields.finishAndCreateGlobal("__objc_eh_typeinfo_" + className,
+ CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::LinkOnceODRLinkage);
return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
}
@@ -1270,13 +1235,13 @@ ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
else if (isa->getType() != PtrToIdTy)
isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
- std::vector<llvm::Constant*> Ivars;
- Ivars.push_back(isa);
- Ivars.push_back(MakeConstantString(Str));
- Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
- llvm::Constant *ObjCStr = MakeGlobal(
- llvm::StructType::get(PtrToIdTy, PtrToInt8Ty, IntTy, nullptr),
- Ivars, Align, ".objc_str");
+ ConstantInitBuilder Builder(CGM);
+ auto Fields = Builder.beginStruct();
+ Fields.add(isa);
+ Fields.add(MakeConstantString(Str));
+ Fields.addInt(IntTy, Str.size());
+ llvm::Constant *ObjCStr =
+ Fields.finishAndCreateGlobal(".objc_str", Align);
ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
@@ -1386,9 +1351,10 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
llvm::Type::getInt1Ty(VMContext), IsClassMessage))};
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
+ CGCallee callee(CGCalleeInfo(), imp);
+
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
- CGCalleeInfo(), &call);
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, callee, Return, ActualArgs, &call);
call->setMetadata(msgSendMDKind, node);
return msgRet;
}
@@ -1500,8 +1466,8 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
imp = EnforceType(Builder, imp, MSI.MessengerType);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
- CGCalleeInfo(), &call);
+ CGCallee callee(CGCalleeInfo(), imp);
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, callee, Return, ActualArgs, &call);
call->setMetadata(msgSendMDKind, node);
@@ -1550,50 +1516,38 @@ GenerateMethodList(StringRef ClassName,
bool isClassMethodList) {
if (MethodSels.empty())
return NULLPtr;
+
+ ConstantInitBuilder Builder(CGM);
+
+ auto MethodList = Builder.beginStruct();
+ MethodList.addNullPointer(CGM.Int8PtrTy);
+ MethodList.addInt(Int32Ty, MethodTypes.size());
+
// Get the method structure type.
- llvm::StructType *ObjCMethodTy = llvm::StructType::get(
- PtrToInt8Ty, // Really a selector, but the runtime creates it us.
- PtrToInt8Ty, // Method types
- IMPTy, //Method pointer
- nullptr);
- std::vector<llvm::Constant*> Methods;
+ llvm::StructType *ObjCMethodTy =
+ llvm::StructType::get(CGM.getLLVMContext(), {
+ PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+ PtrToInt8Ty, // Method types
+ IMPTy // Method pointer
+ });
+ auto Methods = MethodList.beginArray();
for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
- llvm::Constant *Method =
+ llvm::Constant *FnPtr =
TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
MethodSels[i],
isClassMethodList));
- assert(Method && "Can't generate metadata for method that doesn't exist");
- llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
- Method = llvm::ConstantExpr::getBitCast(Method,
- IMPTy);
- Methods.push_back(
- llvm::ConstantStruct::get(ObjCMethodTy, {C, MethodTypes[i], Method}));
- }
-
- // Array of method structures
- llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
- Methods.size());
- llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
- Methods);
-
- // Structure containing list pointer, array and array count
- llvm::StructType *ObjCMethodListTy = llvm::StructType::create(VMContext);
- llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(ObjCMethodListTy);
- ObjCMethodListTy->setBody(
- NextPtrTy,
- IntTy,
- ObjCMethodArrayTy,
- nullptr);
-
- Methods.clear();
- Methods.push_back(llvm::ConstantPointerNull::get(
- llvm::PointerType::getUnqual(ObjCMethodListTy)));
- Methods.push_back(llvm::ConstantInt::get(Int32Ty, MethodTypes.size()));
- Methods.push_back(MethodArray);
+ assert(FnPtr && "Can't generate metadata for method that doesn't exist");
+ auto Method = Methods.beginStruct(ObjCMethodTy);
+ Method.add(MakeConstantString(MethodSels[i].getAsString()));
+ Method.add(MethodTypes[i]);
+ Method.addBitCast(FnPtr, IMPTy);
+ Method.finishAndAddTo(Methods);
+ }
+ Methods.finishAndAddTo(MethodList);
// Create an instance of the structure
- return MakeGlobal(ObjCMethodListTy, Methods, CGM.getPointerAlign(),
- ".objc_method_list");
+ return MethodList.finishAndCreateGlobal(".objc_method_list",
+ CGM.getPointerAlign());
}
/// Generates an IvarList. Used in construction of a objc_class.
@@ -1601,35 +1555,36 @@ llvm::Constant *CGObjCGNU::
GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
ArrayRef<llvm::Constant *> IvarTypes,
ArrayRef<llvm::Constant *> IvarOffsets) {
- if (IvarNames.size() == 0)
+ if (IvarNames.empty())
return NULLPtr;
- // Get the method structure type.
+
+ ConstantInitBuilder Builder(CGM);
+
+ // Structure containing array count followed by array.
+ auto IvarList = Builder.beginStruct();
+ IvarList.addInt(IntTy, (int)IvarNames.size());
+
+ // Get the ivar structure type.
llvm::StructType *ObjCIvarTy = llvm::StructType::get(
PtrToInt8Ty,
PtrToInt8Ty,
IntTy,
nullptr);
- std::vector<llvm::Constant*> Ivars;
+
+ // Array of ivar structures.
+ auto Ivars = IvarList.beginArray(ObjCIvarTy);
for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
- Ivars.push_back(llvm::ConstantStruct::get(
- ObjCIvarTy, {IvarNames[i], IvarTypes[i], IvarOffsets[i]}));
+ auto Ivar = Ivars.beginStruct(ObjCIvarTy);
+ Ivar.add(IvarNames[i]);
+ Ivar.add(IvarTypes[i]);
+ Ivar.add(IvarOffsets[i]);
+ Ivar.finishAndAddTo(Ivars);
}
-
- // Array of method structures
- llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
- IvarNames.size());
-
- llvm::Constant *Elements[] = {
- llvm::ConstantInt::get(IntTy, (int)IvarNames.size()),
- llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars)};
- // Structure containing array and array count
- llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
- ObjCIvarArrayTy,
- nullptr);
+ Ivars.finishAndAddTo(IvarList);
// Create an instance of the structure
- return MakeGlobal(ObjCIvarListTy, Elements, CGM.getPointerAlign(),
- ".objc_ivar_list");
+ return IvarList.finishAndCreateGlobal(".objc_ivar_list",
+ CGM.getPointerAlign());
}
/// Generate a class structure
@@ -1677,34 +1632,55 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
IntPtrTy, // strong_pointers
IntPtrTy, // weak_pointers
nullptr);
- llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
+
+ ConstantInitBuilder Builder(CGM);
+ auto Elements = Builder.beginStruct(ClassTy);
+
// Fill in the structure
- std::vector<llvm::Constant*> Elements;
- Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
- Elements.push_back(SuperClass);
- Elements.push_back(MakeConstantString(Name, ".class_name"));
- Elements.push_back(Zero);
- Elements.push_back(llvm::ConstantInt::get(LongTy, info));
+
+ // isa
+ Elements.addBitCast(MetaClass, PtrToInt8Ty);
+ // super_class
+ Elements.add(SuperClass);
+ // name
+ Elements.add(MakeConstantString(Name, ".class_name"));
+ // version
+ Elements.addInt(LongTy, 0);
+ // info
+ Elements.addInt(LongTy, info);
+ // instance_size
if (isMeta) {
llvm::DataLayout td(&TheModule);
- Elements.push_back(
- llvm::ConstantInt::get(LongTy,
- td.getTypeSizeInBits(ClassTy) /
- CGM.getContext().getCharWidth()));
+ Elements.addInt(LongTy,
+ td.getTypeSizeInBits(ClassTy) /
+ CGM.getContext().getCharWidth());
} else
- Elements.push_back(InstanceSize);
- Elements.push_back(IVars);
- Elements.push_back(Methods);
- Elements.push_back(NULLPtr);
- Elements.push_back(NULLPtr);
- Elements.push_back(NULLPtr);
- Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
- Elements.push_back(NULLPtr);
- Elements.push_back(llvm::ConstantInt::get(LongTy, 1));
- Elements.push_back(IvarOffsets);
- Elements.push_back(Properties);
- Elements.push_back(StrongIvarBitmap);
- Elements.push_back(WeakIvarBitmap);
+ Elements.add(InstanceSize);
+ // ivars
+ Elements.add(IVars);
+ // methods
+ Elements.add(Methods);
+ // These are all filled in by the runtime, so we pretend
+ // dtable
+ Elements.add(NULLPtr);
+ // subclass_list
+ Elements.add(NULLPtr);
+ // sibling_class
+ Elements.add(NULLPtr);
+ // protocols
+ Elements.addBitCast(Protocols, PtrTy);
+ // gc_object_type
+ Elements.add(NULLPtr);
+ // abi_version
+ Elements.addInt(LongTy, 1);
+ // ivar_offsets
+ Elements.add(IvarOffsets);
+ // properties
+ Elements.add(Properties);
+ // strong_pointers
+ Elements.add(StrongIvarBitmap);
+ // weak_pointers
+ Elements.add(WeakIvarBitmap);
// Create an instance of the structure
// This is now an externally visible symbol, so that we can speed up class
// messages in the next ABI. We may already have some weak references to
@@ -1713,13 +1689,13 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
std::string(Name));
llvm::GlobalVariable *ClassRef = TheModule.getNamedGlobal(ClassSym);
llvm::Constant *Class =
- MakeGlobal(ClassTy, Elements, CGM.getPointerAlign(), ClassSym,
- llvm::GlobalValue::ExternalLinkage);
+ Elements.finishAndCreateGlobal(ClassSym, CGM.getPointerAlign(), false,
+ llvm::GlobalValue::ExternalLinkage);
if (ClassRef) {
- ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
+ ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
ClassRef->getType()));
- ClassRef->removeFromParent();
- Class->setName(ClassSym);
+ ClassRef->removeFromParent();
+ Class->setName(ClassSym);
}
return Class;
}
@@ -1728,38 +1704,33 @@ llvm::Constant *CGObjCGNU::
GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames,
ArrayRef<llvm::Constant *> MethodTypes) {
// Get the method structure type.
- llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
- PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
- PtrToInt8Ty,
- nullptr);
- std::vector<llvm::Constant*> Methods;
+ llvm::StructType *ObjCMethodDescTy =
+ llvm::StructType::get(CGM.getLLVMContext(), { PtrToInt8Ty, PtrToInt8Ty });
+ ConstantInitBuilder Builder(CGM);
+ auto MethodList = Builder.beginStruct();
+ MethodList.addInt(IntTy, MethodNames.size());
+ auto Methods = MethodList.beginArray(ObjCMethodDescTy);
for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
- Methods.push_back(llvm::ConstantStruct::get(
- ObjCMethodDescTy, {MethodNames[i], MethodTypes[i]}));
- }
- llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
- MethodNames.size());
- llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
- Methods);
- llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
- IntTy, ObjCMethodArrayTy, nullptr);
- Methods.clear();
- Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
- Methods.push_back(Array);
- return MakeGlobal(ObjCMethodDescListTy, Methods, CGM.getPointerAlign(),
- ".objc_method_list");
+ auto Method = Methods.beginStruct(ObjCMethodDescTy);
+ Method.add(MethodNames[i]);
+ Method.add(MethodTypes[i]);
+ Method.finishAndAddTo(Methods);
+ }
+ Methods.finishAndAddTo(MethodList);
+ return MethodList.finishAndCreateGlobal(".objc_method_list",
+ CGM.getPointerAlign());
}
// Create the protocol list structure used in classes, categories and so on
-llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
- llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
- Protocols.size());
- llvm::StructType *ProtocolListTy = llvm::StructType::get(
- PtrTy, //Should be a recurisve pointer, but it's always NULL here.
- SizeTy,
- ProtocolArrayTy,
- nullptr);
- std::vector<llvm::Constant*> Elements;
+llvm::Constant *
+CGObjCGNU::GenerateProtocolList(ArrayRef<std::string> Protocols) {
+
+ ConstantInitBuilder Builder(CGM);
+ auto ProtocolList = Builder.beginStruct();
+ ProtocolList.add(NULLPtr);
+ ProtocolList.addInt(LongTy, Protocols.size());
+
+ auto Elements = ProtocolList.beginArray(PtrToInt8Ty);
for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
iter != endIter ; iter++) {
llvm::Constant *protocol = nullptr;
@@ -1770,18 +1741,11 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
} else {
protocol = value->getValue();
}
- llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol,
- PtrToInt8Ty);
- Elements.push_back(Ptr);
- }
- llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
- Elements);
- Elements.clear();
- Elements.push_back(NULLPtr);
- Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
- Elements.push_back(ProtocolArray);
- return MakeGlobal(ProtocolListTy, Elements, CGM.getPointerAlign(),
- ".objc_protocol_list");
+ Elements.addBitCast(protocol, PtrToInt8Ty);
+ }
+ Elements.finishAndAddTo(ProtocolList);
+ return ProtocolList.finishAndCreateGlobal(".objc_protocol_list",
+ CGM.getPointerAlign());
}
llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF,
@@ -1792,33 +1756,28 @@ llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
}
-llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
- const std::string &ProtocolName) {
- SmallVector<std::string, 0> EmptyStringVector;
- SmallVector<llvm::Constant*, 0> EmptyConstantVector;
-
- llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
- llvm::Constant *MethodList =
- GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+llvm::Constant *
+CGObjCGNU::GenerateEmptyProtocol(const std::string &ProtocolName) {
+ llvm::Constant *ProtocolList = GenerateProtocolList({});
+ llvm::Constant *MethodList = GenerateProtocolMethodList({}, {});
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
- llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
- PtrToInt8Ty,
- ProtocolList->getType(),
- MethodList->getType(),
- MethodList->getType(),
- MethodList->getType(),
- MethodList->getType(),
- nullptr);
+ ConstantInitBuilder Builder(CGM);
+ auto Elements = Builder.beginStruct();
+
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
- llvm::Constant *Elements[] = {
- llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy),
- MakeConstantString(ProtocolName, ".objc_protocol_name"), ProtocolList,
- MethodList, MethodList, MethodList, MethodList};
- return MakeGlobal(ProtocolTy, Elements, CGM.getPointerAlign(),
- ".objc_protocol");
+ Elements.add(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+
+ Elements.add(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.add(ProtocolList);
+ Elements.add(MethodList);
+ Elements.add(MethodList);
+ Elements.add(MethodList);
+ Elements.add(MethodList);
+ return Elements.finishAndCreateGlobal(".objc_protocol",
+ CGM.getPointerAlign());
}
void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
@@ -1837,8 +1796,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
for (const auto *I : PD->instance_methods()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(I, TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
if (I->getImplementationControl() == ObjCMethodDecl::Optional) {
OptionalInstanceMethodNames.push_back(
MakeConstantString(I->getSelector().getAsString()));
@@ -1855,8 +1813,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
for (const auto *I : PD->class_methods()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(I,TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
if (I->getImplementationControl() == ObjCMethodDecl::Optional) {
OptionalClassMethodNames.push_back(
MakeConstantString(I->getSelector().getAsString()));
@@ -1885,142 +1842,139 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
// The isSynthesized value is always set to 0 in a protocol. It exists to
// simplify the runtime library by allowing it to use the same data
// structures for protocol metadata everywhere.
- llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
- PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
- PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, nullptr);
- std::vector<llvm::Constant*> Properties;
- std::vector<llvm::Constant*> OptionalProperties;
- // Add all of the property methods need adding to the method list and to the
- // property metadata list.
- for (auto *property : PD->instance_properties()) {
- std::vector<llvm::Constant*> Fields;
+ llvm::Constant *PropertyList;
+ llvm::Constant *OptionalPropertyList;
+ {
+ llvm::StructType *propertyMetadataTy =
+ llvm::StructType::get(CGM.getLLVMContext(),
+ { PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty });
+
+ unsigned numReqProperties = 0, numOptProperties = 0;
+ for (auto property : PD->instance_properties()) {
+ if (property->isOptional())
+ numOptProperties++;
+ else
+ numReqProperties++;
+ }
- Fields.push_back(MakePropertyEncodingString(property, nullptr));
- PushPropertyAttributes(Fields, property);
+ ConstantInitBuilder reqPropertyListBuilder(CGM);
+ auto reqPropertiesList = reqPropertyListBuilder.beginStruct();
+ reqPropertiesList.addInt(IntTy, numReqProperties);
+ reqPropertiesList.add(NULLPtr);
+ auto reqPropertiesArray = reqPropertiesList.beginArray(propertyMetadataTy);
+
+ ConstantInitBuilder optPropertyListBuilder(CGM);
+ auto optPropertiesList = optPropertyListBuilder.beginStruct();
+ optPropertiesList.addInt(IntTy, numOptProperties);
+ optPropertiesList.add(NULLPtr);
+ auto optPropertiesArray = optPropertiesList.beginArray(propertyMetadataTy);
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (auto *property : PD->instance_properties()) {
+ auto &propertiesArray =
+ (property->isOptional() ? optPropertiesArray : reqPropertiesArray);
+ auto fields = propertiesArray.beginStruct(propertyMetadataTy);
+
+ fields.add(MakePropertyEncodingString(property, nullptr));
+ PushPropertyAttributes(fields, property);
+
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string typeStr = Context.getObjCEncodingForMethodDecl(getter);
+ llvm::Constant *typeEncoding = MakeConstantString(typeStr);
+ InstanceMethodTypes.push_back(typeEncoding);
+ fields.add(MakeConstantString(getter->getSelector().getAsString()));
+ fields.add(typeEncoding);
+ } else {
+ fields.add(NULLPtr);
+ fields.add(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string typeStr = Context.getObjCEncodingForMethodDecl(setter);
+ llvm::Constant *typeEncoding = MakeConstantString(typeStr);
+ InstanceMethodTypes.push_back(typeEncoding);
+ fields.add(MakeConstantString(setter->getSelector().getAsString()));
+ fields.add(typeEncoding);
+ } else {
+ fields.add(NULLPtr);
+ fields.add(NULLPtr);
+ }
- if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(getter,TypeStr);
- llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
- InstanceMethodTypes.push_back(TypeEncoding);
- Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
- Fields.push_back(TypeEncoding);
- } else {
- Fields.push_back(NULLPtr);
- Fields.push_back(NULLPtr);
- }
- if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(setter,TypeStr);
- llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
- InstanceMethodTypes.push_back(TypeEncoding);
- Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
- Fields.push_back(TypeEncoding);
- } else {
- Fields.push_back(NULLPtr);
- Fields.push_back(NULLPtr);
- }
- if (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) {
- OptionalProperties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
- } else {
- Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ fields.finishAndAddTo(propertiesArray);
}
+
+ reqPropertiesArray.finishAndAddTo(reqPropertiesList);
+ PropertyList =
+ reqPropertiesList.finishAndCreateGlobal(".objc_property_list",
+ CGM.getPointerAlign());
+
+ optPropertiesArray.finishAndAddTo(optPropertiesList);
+ OptionalPropertyList =
+ optPropertiesList.finishAndCreateGlobal(".objc_property_list",
+ CGM.getPointerAlign());
}
- llvm::Constant *PropertyArray = llvm::ConstantArray::get(
- llvm::ArrayType::get(PropertyMetadataTy, Properties.size()), Properties);
- llvm::Constant* PropertyListInitFields[] =
- {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
-
- llvm::Constant *PropertyListInit =
- llvm::ConstantStruct::getAnon(PropertyListInitFields);
- llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
- PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
- PropertyListInit, ".objc_property_list");
-
- llvm::Constant *OptionalPropertyArray =
- llvm::ConstantArray::get(llvm::ArrayType::get(PropertyMetadataTy,
- OptionalProperties.size()) , OptionalProperties);
- llvm::Constant* OptionalPropertyListInitFields[] = {
- llvm::ConstantInt::get(IntTy, OptionalProperties.size()), NULLPtr,
- OptionalPropertyArray };
-
- llvm::Constant *OptionalPropertyListInit =
- llvm::ConstantStruct::getAnon(OptionalPropertyListInitFields);
- llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
- OptionalPropertyListInit->getType(), false,
- llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
- ".objc_property_list");
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
- llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
- PtrToInt8Ty,
- ProtocolList->getType(),
- InstanceMethodList->getType(),
- ClassMethodList->getType(),
- OptionalInstanceMethodList->getType(),
- OptionalClassMethodList->getType(),
- PropertyList->getType(),
- OptionalPropertyList->getType(),
- nullptr);
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
- llvm::Constant *Elements[] = {
+ ConstantInitBuilder Builder(CGM);
+ auto Elements = Builder.beginStruct();
+ Elements.add(
llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy),
- MakeConstantString(ProtocolName, ".objc_protocol_name"), ProtocolList,
- InstanceMethodList, ClassMethodList, OptionalInstanceMethodList,
- OptionalClassMethodList, PropertyList, OptionalPropertyList};
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+ Elements.add(
+ MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.add(ProtocolList);
+ Elements.add(InstanceMethodList);
+ Elements.add(ClassMethodList);
+ Elements.add(OptionalInstanceMethodList);
+ Elements.add(OptionalClassMethodList);
+ Elements.add(PropertyList);
+ Elements.add(OptionalPropertyList);
ExistingProtocols[ProtocolName] =
- llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
- CGM.getPointerAlign(), ".objc_protocol"), IdTy);
+ llvm::ConstantExpr::getBitCast(
+ Elements.finishAndCreateGlobal(".objc_protocol", CGM.getPointerAlign()),
+ IdTy);
}
void CGObjCGNU::GenerateProtocolHolderCategory() {
// Collect information about instance methods
SmallVector<Selector, 1> MethodSels;
SmallVector<llvm::Constant*, 1> MethodTypes;
- std::vector<llvm::Constant*> Elements;
+ ConstantInitBuilder Builder(CGM);
+ auto Elements = Builder.beginStruct();
+
const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack";
const std::string CategoryName = "AnotherHack";
- Elements.push_back(MakeConstantString(CategoryName));
- Elements.push_back(MakeConstantString(ClassName));
+ Elements.add(MakeConstantString(CategoryName));
+ Elements.add(MakeConstantString(ClassName));
// Instance method list
- Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
- ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy));
+ Elements.addBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy);
// Class method list
- Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
- ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy));
+ Elements.addBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy);
+
// Protocol list
- llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
- ExistingProtocols.size());
- llvm::StructType *ProtocolListTy = llvm::StructType::get(
- PtrTy, //Should be a recurisve pointer, but it's always NULL here.
- SizeTy,
- ProtocolArrayTy,
- nullptr);
- std::vector<llvm::Constant*> ProtocolElements;
- for (llvm::StringMapIterator<llvm::Constant*> iter =
- ExistingProtocols.begin(), endIter = ExistingProtocols.end();
+ ConstantInitBuilder ProtocolListBuilder(CGM);
+ auto ProtocolList = ProtocolListBuilder.beginStruct();
+ ProtocolList.add(NULLPtr);
+ ProtocolList.addInt(LongTy, ExistingProtocols.size());
+ auto ProtocolElements = ProtocolList.beginArray(PtrTy);
+ for (auto iter = ExistingProtocols.begin(), endIter = ExistingProtocols.end();
iter != endIter ; iter++) {
- llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(iter->getValue(),
- PtrTy);
- ProtocolElements.push_back(Ptr);
- }
- llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
- ProtocolElements);
- ProtocolElements.clear();
- ProtocolElements.push_back(NULLPtr);
- ProtocolElements.push_back(llvm::ConstantInt::get(LongTy,
- ExistingProtocols.size()));
- ProtocolElements.push_back(ProtocolArray);
- Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
- ProtocolElements, CGM.getPointerAlign(),
- ".objc_protocol_list"), PtrTy));
+ ProtocolElements.addBitCast(iter->getValue(), PtrTy);
+ }
+ ProtocolElements.finishAndAddTo(ProtocolList);
+ Elements.addBitCast(
+ ProtocolList.finishAndCreateGlobal(".objc_protocol_list",
+ CGM.getPointerAlign()),
+ PtrTy);
Categories.push_back(llvm::ConstantExpr::getBitCast(
- MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
- PtrTy, PtrTy, PtrTy, nullptr), Elements, CGM.getPointerAlign()),
+ Elements.finishAndCreateGlobal("", CGM.getPointerAlign()),
PtrTy));
}
@@ -2055,13 +2009,16 @@ llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
}
values.push_back(llvm::ConstantInt::get(Int32Ty, word));
}
- llvm::ArrayType *arrayTy = llvm::ArrayType::get(Int32Ty, values.size());
- llvm::Constant *array = llvm::ConstantArray::get(arrayTy, values);
- llvm::Constant *fields[2] = {
- llvm::ConstantInt::get(Int32Ty, values.size()),
- array };
- llvm::Constant *GS = MakeGlobal(llvm::StructType::get(Int32Ty, arrayTy,
- nullptr), fields, CharUnits::fromQuantity(4));
+
+ ConstantInitBuilder builder(CGM);
+ auto fields = builder.beginStruct();
+ fields.addInt(Int32Ty, values.size());
+ auto array = fields.beginArray();
+ for (auto v : values) array.add(v);
+ array.finishAndAddTo(fields);
+
+ llvm::Constant *GS =
+ fields.finishAndCreateGlobal("", CharUnits::fromQuantity(4));
llvm::Constant *ptr = llvm::ConstantExpr::getPtrToInt(GS, IntPtrTy);
return ptr;
}
@@ -2074,8 +2031,7 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
for (const auto *I : OCD->instance_methods()) {
InstanceMethodSels.push_back(I->getSelector());
- std::string TypeStr;
- CGM.getContext().getObjCEncodingForMethodDecl(I,TypeStr);
+ std::string TypeStr = CGM.getContext().getObjCEncodingForMethodDecl(I);
InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
}
@@ -2084,8 +2040,7 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
SmallVector<llvm::Constant*, 16> ClassMethodTypes;
for (const auto *I : OCD->class_methods()) {
ClassMethodSels.push_back(I->getSelector());
- std::string TypeStr;
- CGM.getContext().getObjCEncodingForMethodDecl(I,TypeStr);
+ std::string TypeStr = CGM.getContext().getObjCEncodingForMethodDecl(I);
ClassMethodTypes.push_back(MakeConstantString(TypeStr));
}
@@ -2097,23 +2052,24 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
E = Protos.end(); I != E; ++I)
Protocols.push_back((*I)->getNameAsString());
- llvm::Constant *Elements[] = {
- MakeConstantString(CategoryName), MakeConstantString(ClassName),
- // Instance method list
- llvm::ConstantExpr::getBitCast(
+ ConstantInitBuilder Builder(CGM);
+ auto Elements = Builder.beginStruct();
+ Elements.add(MakeConstantString(CategoryName));
+ Elements.add(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.addBitCast(
GenerateMethodList(ClassName, CategoryName, InstanceMethodSels,
InstanceMethodTypes, false),
- PtrTy),
- // Class method list
- llvm::ConstantExpr::getBitCast(GenerateMethodList(ClassName, CategoryName,
- ClassMethodSels,
- ClassMethodTypes, true),
- PtrTy),
- // Protocol list
- llvm::ConstantExpr::getBitCast(GenerateProtocolList(Protocols), PtrTy)};
+ PtrTy);
+ // Class method list
+ Elements.addBitCast(
+ GenerateMethodList(ClassName, CategoryName, ClassMethodSels,
+ ClassMethodTypes, true),
+ PtrTy);
+ // Protocol list
+ Elements.addBitCast(GenerateProtocolList(Protocols), PtrTy);
Categories.push_back(llvm::ConstantExpr::getBitCast(
- MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
- PtrTy, PtrTy, PtrTy, nullptr), Elements, CGM.getPointerAlign()),
+ Elements.finishAndCreateGlobal("", CGM.getPointerAlign()),
PtrTy));
}
@@ -2123,65 +2079,67 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
ASTContext &Context = CGM.getContext();
// Property metadata: name, attributes, attributes2, padding1, padding2,
// setter name, setter types, getter name, getter types.
- llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
- PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
- PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, nullptr);
- std::vector<llvm::Constant*> Properties;
+ llvm::StructType *propertyMetadataTy =
+ llvm::StructType::get(CGM.getLLVMContext(),
+ { PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty });
+
+ unsigned numProperties = 0;
+ for (auto *propertyImpl : OID->property_impls()) {
+ (void) propertyImpl;
+ numProperties++;
+ }
+
+ ConstantInitBuilder builder(CGM);
+ auto propertyList = builder.beginStruct();
+ propertyList.addInt(IntTy, numProperties);
+ propertyList.add(NULLPtr);
+ auto properties = propertyList.beginArray(propertyMetadataTy);
// Add all of the property methods need adding to the method list and to the
// property metadata list.
for (auto *propertyImpl : OID->property_impls()) {
- std::vector<llvm::Constant*> Fields;
+ auto fields = properties.beginStruct(propertyMetadataTy);
ObjCPropertyDecl *property = propertyImpl->getPropertyDecl();
bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
ObjCPropertyImplDecl::Synthesize);
bool isDynamic = (propertyImpl->getPropertyImplementation() ==
ObjCPropertyImplDecl::Dynamic);
- Fields.push_back(MakePropertyEncodingString(property, OID));
- PushPropertyAttributes(Fields, property, isSynthesized, isDynamic);
+ fields.add(MakePropertyEncodingString(property, OID));
+ PushPropertyAttributes(fields, property, isSynthesized, isDynamic);
if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(getter);
llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
if (isSynthesized) {
InstanceMethodTypes.push_back(TypeEncoding);
InstanceMethodSels.push_back(getter->getSelector());
}
- Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
- Fields.push_back(TypeEncoding);
+ fields.add(MakeConstantString(getter->getSelector().getAsString()));
+ fields.add(TypeEncoding);
} else {
- Fields.push_back(NULLPtr);
- Fields.push_back(NULLPtr);
+ fields.add(NULLPtr);
+ fields.add(NULLPtr);
}
if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(setter);
llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
if (isSynthesized) {
InstanceMethodTypes.push_back(TypeEncoding);
InstanceMethodSels.push_back(setter->getSelector());
}
- Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
- Fields.push_back(TypeEncoding);
+ fields.add(MakeConstantString(setter->getSelector().getAsString()));
+ fields.add(TypeEncoding);
} else {
- Fields.push_back(NULLPtr);
- Fields.push_back(NULLPtr);
+ fields.add(NULLPtr);
+ fields.add(NULLPtr);
}
- Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
- }
- llvm::ArrayType *PropertyArrayTy =
- llvm::ArrayType::get(PropertyMetadataTy, Properties.size());
- llvm::Constant *PropertyArray = llvm::ConstantArray::get(PropertyArrayTy,
- Properties);
- llvm::Constant* PropertyListInitFields[] =
- {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
-
- llvm::Constant *PropertyListInit =
- llvm::ConstantStruct::getAnon(PropertyListInitFields);
- return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
- llvm::GlobalValue::InternalLinkage, PropertyListInit,
- ".objc_property_list");
+ fields.finishAndAddTo(properties);
+ }
+ properties.finishAndAddTo(propertyList);
+
+ return propertyList.finishAndCreateGlobal(".objc_property_list",
+ CGM.getPointerAlign());
}
void CGObjCGNU::RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {
@@ -2230,7 +2188,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
SmallVector<llvm::Constant*, 16> IvarTypes;
SmallVector<llvm::Constant*, 16> IvarOffsets;
- std::vector<llvm::Constant*> IvarOffsetValues;
+ ConstantInitBuilder IvarOffsetBuilder(CGM);
+ auto IvarOffsetValues = IvarOffsetBuilder.beginArray(PtrToIntTy);
SmallVector<bool, 16> WeakIvars;
SmallVector<bool, 16> StrongIvars;
@@ -2274,7 +2233,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
"__objc_ivar_offset_value_" + ClassName +"." +
IVD->getNameAsString());
IvarOffsets.push_back(OffsetValue);
- IvarOffsetValues.push_back(OffsetVar);
+ IvarOffsetValues.add(OffsetVar);
Qualifiers::ObjCLifetime lt = IVD->getType().getQualifiers().getObjCLifetime();
switch (lt) {
case Qualifiers::OCL_Strong:
@@ -2293,16 +2252,15 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::Constant *StrongIvarBitmap = MakeBitField(StrongIvars);
llvm::Constant *WeakIvarBitmap = MakeBitField(WeakIvars);
llvm::GlobalVariable *IvarOffsetArray =
- MakeGlobalArray(PtrToIntTy, IvarOffsetValues, CGM.getPointerAlign(),
- ".ivar.offsets");
+ IvarOffsetValues.finishAndCreateGlobal(".ivar.offsets",
+ CGM.getPointerAlign());
// Collect information about instance methods
SmallVector<Selector, 16> InstanceMethodSels;
SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
for (const auto *I : OID->instance_methods()) {
InstanceMethodSels.push_back(I->getSelector());
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(I,TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
}
@@ -2314,8 +2272,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
SmallVector<llvm::Constant*, 16> ClassMethodTypes;
for (const auto *I : OID->class_methods()) {
ClassMethodSels.push_back(I->getSelector());
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(I,TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
ClassMethodTypes.push_back(MakeConstantString(TypeStr));
}
// Collect the names of referenced protocols
@@ -2439,170 +2396,180 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Add all referenced protocols to a category.
GenerateProtocolHolderCategory();
- llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
- SelectorTy->getElementType());
- llvm::Type *SelStructPtrTy = SelectorTy;
- if (!SelStructTy) {
- SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, nullptr);
- SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
+ llvm::StructType *selStructTy =
+ dyn_cast<llvm::StructType>(SelectorTy->getElementType());
+ llvm::Type *selStructPtrTy = SelectorTy;
+ if (!selStructTy) {
+ selStructTy = llvm::StructType::get(CGM.getLLVMContext(),
+ { PtrToInt8Ty, PtrToInt8Ty });
+ selStructPtrTy = llvm::PointerType::getUnqual(selStructTy);
}
- std::vector<llvm::Constant*> Elements;
- llvm::Constant *Statics = NULLPtr;
// Generate statics list:
+ llvm::Constant *statics = NULLPtr;
if (!ConstantStrings.empty()) {
- llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
- ConstantStrings.size() + 1);
- ConstantStrings.push_back(NULLPtr);
-
- StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
-
- if (StringClass.empty()) StringClass = "NXConstantString";
-
- Elements.push_back(MakeConstantString(StringClass,
- ".objc_static_class_name"));
- Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
- ConstantStrings));
- llvm::StructType *StaticsListTy =
- llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, nullptr);
- llvm::Type *StaticsListPtrTy =
- llvm::PointerType::getUnqual(StaticsListTy);
- Statics = MakeGlobal(StaticsListTy, Elements, CGM.getPointerAlign(),
- ".objc_statics");
- llvm::ArrayType *StaticsListArrayTy =
- llvm::ArrayType::get(StaticsListPtrTy, 2);
- Elements.clear();
- Elements.push_back(Statics);
- Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
- Statics = MakeGlobal(StaticsListArrayTy, Elements,
- CGM.getPointerAlign(), ".objc_statics_ptr");
- Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
- }
- // Array of classes, categories, and constant objects
- llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
- Classes.size() + Categories.size() + 2);
- llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
- llvm::Type::getInt16Ty(VMContext),
- llvm::Type::getInt16Ty(VMContext),
- ClassListTy, nullptr);
-
- Elements.clear();
- // Pointer to an array of selectors used in this module.
- std::vector<llvm::Constant*> Selectors;
- std::vector<llvm::GlobalAlias*> SelectorAliases;
- for (SelectorMap::iterator iter = SelectorTable.begin(),
- iterEnd = SelectorTable.end(); iter != iterEnd ; ++iter) {
+ llvm::GlobalVariable *fileStatics = [&] {
+ ConstantInitBuilder builder(CGM);
+ auto staticsStruct = builder.beginStruct();
- std::string SelNameStr = iter->first.getAsString();
- llvm::Constant *SelName = ExportUniqueString(SelNameStr, ".objc_sel_name");
+ StringRef stringClass = CGM.getLangOpts().ObjCConstantStringClass;
+ if (stringClass.empty()) stringClass = "NXConstantString";
+ staticsStruct.add(MakeConstantString(stringClass,
+ ".objc_static_class_name"));
- SmallVectorImpl<TypedSelector> &Types = iter->second;
- for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
- e = Types.end() ; i!=e ; i++) {
+ auto array = staticsStruct.beginArray();
+ array.addAll(ConstantStrings);
+ array.add(NULLPtr);
+ array.finishAndAddTo(staticsStruct);
- llvm::Constant *SelectorTypeEncoding = NULLPtr;
- if (!i->first.empty())
- SelectorTypeEncoding = MakeConstantString(i->first, ".objc_sel_types");
+ return staticsStruct.finishAndCreateGlobal(".objc_statics",
+ CGM.getPointerAlign());
+ }();
- Elements.push_back(SelName);
- Elements.push_back(SelectorTypeEncoding);
- Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
- Elements.clear();
+ ConstantInitBuilder builder(CGM);
+ auto allStaticsArray = builder.beginArray(fileStatics->getType());
+ allStaticsArray.add(fileStatics);
+ allStaticsArray.addNullPointer(fileStatics->getType());
- // Store the selector alias for later replacement
- SelectorAliases.push_back(i->second);
- }
+ statics = allStaticsArray.finishAndCreateGlobal(".objc_statics_ptr",
+ CGM.getPointerAlign());
+ statics = llvm::ConstantExpr::getBitCast(statics, PtrTy);
}
- unsigned SelectorCount = Selectors.size();
- // NULL-terminate the selector list. This should not actually be required,
- // because the selector list has a length field. Unfortunately, the GCC
- // runtime decides to ignore the length field and expects a NULL terminator,
- // and GCC cooperates with this by always setting the length to 0.
- Elements.push_back(NULLPtr);
- Elements.push_back(NULLPtr);
- Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
- Elements.clear();
-
- // Number of static selectors
- Elements.push_back(llvm::ConstantInt::get(LongTy, SelectorCount));
- llvm::GlobalVariable *SelectorList =
- MakeGlobalArray(SelStructTy, Selectors, CGM.getPointerAlign(),
- ".objc_selector_list");
- Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
- SelStructPtrTy));
- // Now that all of the static selectors exist, create pointers to them.
- for (unsigned int i=0 ; i<SelectorCount ; i++) {
+ // Array of classes, categories, and constant objects.
+
+ SmallVector<llvm::GlobalAlias*, 16> selectorAliases;
+ unsigned selectorCount;
+
+ // Pointer to an array of selectors used in this module.
+ llvm::GlobalVariable *selectorList = [&] {
+ ConstantInitBuilder builder(CGM);
+ auto selectors = builder.beginArray(selStructTy);
+ auto &table = SelectorTable; // MSVC workaround
+ for (auto &entry : table) {
+
+ std::string selNameStr = entry.first.getAsString();
+ llvm::Constant *selName = ExportUniqueString(selNameStr, ".objc_sel_name");
+
+ for (TypedSelector &sel : entry.second) {
+ llvm::Constant *selectorTypeEncoding = NULLPtr;
+ if (!sel.first.empty())
+ selectorTypeEncoding =
+ MakeConstantString(sel.first, ".objc_sel_types");
+
+ auto selStruct = selectors.beginStruct(selStructTy);
+ selStruct.add(selName);
+ selStruct.add(selectorTypeEncoding);
+ selStruct.finishAndAddTo(selectors);
+
+ // Store the selector alias for later replacement
+ selectorAliases.push_back(sel.second);
+ }
+ }
+
+ // Remember the number of entries in the selector table.
+ selectorCount = selectors.size();
- llvm::Constant *Idxs[] = {Zeros[0],
- llvm::ConstantInt::get(Int32Ty, i), Zeros[0]};
+ // NULL-terminate the selector list. This should not actually be required,
+ // because the selector list has a length field. Unfortunately, the GCC
+ // runtime decides to ignore the length field and expects a NULL terminator,
+ // and GCC cooperates with this by always setting the length to 0.
+ auto selStruct = selectors.beginStruct(selStructTy);
+ selStruct.add(NULLPtr);
+ selStruct.add(NULLPtr);
+ selStruct.finishAndAddTo(selectors);
+
+ return selectors.finishAndCreateGlobal(".objc_selector_list",
+ CGM.getPointerAlign());
+ }();
+
+ // Now that all of the static selectors exist, create pointers to them.
+ for (unsigned i = 0; i < selectorCount; ++i) {
+ llvm::Constant *idxs[] = {
+ Zeros[0],
+ llvm::ConstantInt::get(Int32Ty, i)
+ };
// FIXME: We're generating redundant loads and stores here!
- llvm::Constant *SelPtr = llvm::ConstantExpr::getGetElementPtr(
- SelectorList->getValueType(), SelectorList, makeArrayRef(Idxs, 2));
+ llvm::Constant *selPtr = llvm::ConstantExpr::getGetElementPtr(
+ selectorList->getValueType(), selectorList, idxs);
// If selectors are defined as an opaque type, cast the pointer to this
// type.
- SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, SelectorTy);
- SelectorAliases[i]->replaceAllUsesWith(SelPtr);
- SelectorAliases[i]->eraseFromParent();
- }
-
- // Number of classes defined.
- Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
- Classes.size()));
- // Number of categories defined
- Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
- Categories.size()));
- // Create an array of classes, then categories, then static object instances
- Classes.insert(Classes.end(), Categories.begin(), Categories.end());
- // NULL-terminated list of static object instances (mainly constant strings)
- Classes.push_back(Statics);
- Classes.push_back(NULLPtr);
- llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
- Elements.push_back(ClassList);
- // Construct the symbol table
- llvm::Constant *SymTab =
- MakeGlobal(SymTabTy, Elements, CGM.getPointerAlign());
+ selPtr = llvm::ConstantExpr::getBitCast(selPtr, SelectorTy);
+ selectorAliases[i]->replaceAllUsesWith(selPtr);
+ selectorAliases[i]->eraseFromParent();
+ }
+
+ llvm::GlobalVariable *symtab = [&] {
+ ConstantInitBuilder builder(CGM);
+ auto symtab = builder.beginStruct();
+
+ // Number of static selectors
+ symtab.addInt(LongTy, selectorCount);
+
+ symtab.addBitCast(selectorList, selStructPtrTy);
+
+ // Number of classes defined.
+ symtab.addInt(CGM.Int16Ty, Classes.size());
+ // Number of categories defined
+ symtab.addInt(CGM.Int16Ty, Categories.size());
+
+ // Create an array of classes, then categories, then static object instances
+ auto classList = symtab.beginArray(PtrToInt8Ty);
+ classList.addAll(Classes);
+ classList.addAll(Categories);
+ // NULL-terminated list of static object instances (mainly constant strings)
+ classList.add(statics);
+ classList.add(NULLPtr);
+ classList.finishAndAddTo(symtab);
+
+ // Construct the symbol table.
+ return symtab.finishAndCreateGlobal("", CGM.getPointerAlign());
+ }();
// The symbol table is contained in a module which has some version-checking
// constants
- llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
- PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy),
- (RuntimeVersion >= 10) ? IntTy : nullptr, nullptr);
- Elements.clear();
- // Runtime version, used for ABI compatibility checking.
- Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
- // sizeof(ModuleTy)
- llvm::DataLayout td(&TheModule);
- Elements.push_back(
- llvm::ConstantInt::get(LongTy,
- td.getTypeSizeInBits(ModuleTy) /
- CGM.getContext().getCharWidth()));
-
- // The path to the source file where this module was declared
- SourceManager &SM = CGM.getContext().getSourceManager();
- const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID());
- std::string path =
- std::string(mainFile->getDir()->getName()) + '/' + mainFile->getName();
- Elements.push_back(MakeConstantString(path, ".objc_source_file_name"));
- Elements.push_back(SymTab);
-
- if (RuntimeVersion >= 10)
- switch (CGM.getLangOpts().getGC()) {
+ llvm::Constant *module = [&] {
+ llvm::Type *moduleEltTys[] = {
+ LongTy, LongTy, PtrToInt8Ty, symtab->getType(), IntTy
+ };
+ llvm::StructType *moduleTy =
+ llvm::StructType::get(CGM.getLLVMContext(),
+ makeArrayRef(moduleEltTys).drop_back(unsigned(RuntimeVersion < 10)));
+
+ ConstantInitBuilder builder(CGM);
+ auto module = builder.beginStruct(moduleTy);
+ // Runtime version, used for ABI compatibility checking.
+ module.addInt(LongTy, RuntimeVersion);
+ // sizeof(ModuleTy)
+ module.addInt(LongTy, CGM.getDataLayout().getTypeStoreSize(moduleTy));
+
+ // The path to the source file where this module was declared
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID());
+ std::string path =
+ (Twine(mainFile->getDir()->getName()) + "/" + mainFile->getName()).str();
+ module.add(MakeConstantString(path, ".objc_source_file_name"));
+ module.add(symtab);
+
+ if (RuntimeVersion >= 10) {
+ switch (CGM.getLangOpts().getGC()) {
case LangOptions::GCOnly:
- Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
+ module.addInt(IntTy, 2);
break;
case LangOptions::NonGC:
if (CGM.getLangOpts().ObjCAutoRefCount)
- Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ module.addInt(IntTy, 1);
else
- Elements.push_back(llvm::ConstantInt::get(IntTy, 0));
+ module.addInt(IntTy, 0);
break;
case LangOptions::HybridGC:
- Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ module.addInt(IntTy, 1);
break;
+ }
}
- llvm::Value *Module = MakeGlobal(ModuleTy, Elements, CGM.getPointerAlign());
+ return module.finishAndCreateGlobal("", CGM.getPointerAlign());
+ }();
// Create the load function calling the runtime entry point with the module
// structure
@@ -2616,10 +2583,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Builder.SetInsertPoint(EntryBB);
llvm::FunctionType *FT =
- llvm::FunctionType::get(Builder.getVoidTy(),
- llvm::PointerType::getUnqual(ModuleTy), true);
+ llvm::FunctionType::get(Builder.getVoidTy(), module->getType(), true);
llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
- Builder.CreateCall(Register, Module);
+ Builder.CreateCall(Register, module);
if (!ClassAliases.empty()) {
llvm::Type *ArgTypes[2] = {PtrTy, PtrToInt8Ty};
@@ -2646,8 +2612,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
for (std::vector<ClassAliasPair>::iterator iter = ClassAliases.begin();
iter != ClassAliases.end(); ++iter) {
llvm::Constant *TheClass =
- TheModule.getGlobalVariable(("_OBJC_CLASS_" + iter->first).c_str(),
- true);
+ TheModule.getGlobalVariable("_OBJC_CLASS_" + iter->first, true);
if (TheClass) {
TheClass = llvm::ConstantExpr::getBitCast(TheClass, PtrTy);
Builder.CreateCall(RegisterAlias,
@@ -2910,9 +2875,11 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
if (RuntimeVersion < 10 ||
CGF.CGM.getTarget().getTriple().isKnownWindowsMSVCEnvironment())
return CGF.Builder.CreateZExtOrBitCast(
- CGF.Builder.CreateDefaultAlignedLoad(CGF.Builder.CreateAlignedLoad(
- ObjCIvarOffsetVariable(Interface, Ivar),
- CGF.getPointerAlign(), "ivar")),
+ CGF.Builder.CreateAlignedLoad(
+ Int32Ty, CGF.Builder.CreateAlignedLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar),
+ CGF.getPointerAlign(), "ivar"),
+ CharUnits::fromQuantity(4)),
PtrDiffTy);
std::string name = "__objc_ivar_offset_value_" +
Interface->getNameAsString() +"." + Ivar->getNameAsString();
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 5ab9fc4f9710..7219592fffcd 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -11,12 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#include "CGObjCRuntime.h"
#include "CGBlocks.h"
#include "CGCleanup.h"
+#include "CGObjCRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -25,6 +26,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -172,18 +174,18 @@ protected:
CodeGen::CodeGenModule &CGM;
public:
- llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
- llvm::Type *Int8PtrTy, *Int8PtrPtrTy;
+ llvm::IntegerType *ShortTy, *IntTy, *LongTy;
+ llvm::PointerType *Int8PtrTy, *Int8PtrPtrTy;
llvm::Type *IvarOffsetVarTy;
/// ObjectPtrTy - LLVM type for object handles (typeof(id))
- llvm::Type *ObjectPtrTy;
+ llvm::PointerType *ObjectPtrTy;
/// PtrObjectPtrTy - LLVM type for id *
- llvm::Type *PtrObjectPtrTy;
+ llvm::PointerType *PtrObjectPtrTy;
/// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
- llvm::Type *SelectorPtrTy;
+ llvm::PointerType *SelectorPtrTy;
private:
/// ProtocolPtrTy - LLVM type for external protocol handles
@@ -212,7 +214,7 @@ public:
/// SuperTy - LLVM type for struct objc_super.
llvm::StructType *SuperTy;
/// SuperPtrTy - LLVM type for struct objc_super *.
- llvm::Type *SuperPtrTy;
+ llvm::PointerType *SuperPtrTy;
/// PropertyTy - LLVM type for struct objc_property (struct _prop_t
/// in GCC parlance).
@@ -222,7 +224,7 @@ public:
/// (_prop_list_t in GCC parlance).
llvm::StructType *PropertyListTy;
/// PropertyListPtrTy - LLVM type for struct objc_property_list*.
- llvm::Type *PropertyListPtrTy;
+ llvm::PointerType *PropertyListPtrTy;
// MethodTy - LLVM type for struct objc_method.
llvm::StructType *MethodTy;
@@ -230,7 +232,7 @@ public:
/// CacheTy - LLVM type for struct objc_cache.
llvm::Type *CacheTy;
/// CachePtrTy - LLVM type for struct objc_cache *.
- llvm::Type *CachePtrTy;
+ llvm::PointerType *CachePtrTy;
llvm::Constant *getGetPropertyFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
@@ -500,20 +502,20 @@ public:
/// SymtabTy - LLVM type for struct objc_symtab.
llvm::StructType *SymtabTy;
/// SymtabPtrTy - LLVM type for struct objc_symtab *.
- llvm::Type *SymtabPtrTy;
+ llvm::PointerType *SymtabPtrTy;
/// ModuleTy - LLVM type for struct objc_module.
llvm::StructType *ModuleTy;
/// ProtocolTy - LLVM type for struct objc_protocol.
llvm::StructType *ProtocolTy;
/// ProtocolPtrTy - LLVM type for struct objc_protocol *.
- llvm::Type *ProtocolPtrTy;
+ llvm::PointerType *ProtocolPtrTy;
/// ProtocolExtensionTy - LLVM type for struct
/// objc_protocol_extension.
llvm::StructType *ProtocolExtensionTy;
/// ProtocolExtensionTy - LLVM type for struct
/// objc_protocol_extension *.
- llvm::Type *ProtocolExtensionPtrTy;
+ llvm::PointerType *ProtocolExtensionPtrTy;
/// MethodDescriptionTy - LLVM type for struct
/// objc_method_description.
llvm::StructType *MethodDescriptionTy;
@@ -522,34 +524,34 @@ public:
llvm::StructType *MethodDescriptionListTy;
/// MethodDescriptionListPtrTy - LLVM type for struct
/// objc_method_description_list *.
- llvm::Type *MethodDescriptionListPtrTy;
+ llvm::PointerType *MethodDescriptionListPtrTy;
/// ProtocolListTy - LLVM type for struct objc_property_list.
llvm::StructType *ProtocolListTy;
/// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
- llvm::Type *ProtocolListPtrTy;
+ llvm::PointerType *ProtocolListPtrTy;
/// CategoryTy - LLVM type for struct objc_category.
llvm::StructType *CategoryTy;
/// ClassTy - LLVM type for struct objc_class.
llvm::StructType *ClassTy;
/// ClassPtrTy - LLVM type for struct objc_class *.
- llvm::Type *ClassPtrTy;
+ llvm::PointerType *ClassPtrTy;
/// ClassExtensionTy - LLVM type for struct objc_class_ext.
llvm::StructType *ClassExtensionTy;
/// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
- llvm::Type *ClassExtensionPtrTy;
+ llvm::PointerType *ClassExtensionPtrTy;
// IvarTy - LLVM type for struct objc_ivar.
llvm::StructType *IvarTy;
/// IvarListTy - LLVM type for struct objc_ivar_list.
- llvm::Type *IvarListTy;
+ llvm::StructType *IvarListTy;
/// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
- llvm::Type *IvarListPtrTy;
+ llvm::PointerType *IvarListPtrTy;
/// MethodListTy - LLVM type for struct objc_method_list.
- llvm::Type *MethodListTy;
+ llvm::StructType *MethodListTy;
/// MethodListPtrTy - LLVM type for struct objc_method_list *.
- llvm::Type *MethodListPtrTy;
+ llvm::PointerType *MethodListPtrTy;
/// ExceptionDataTy - LLVM type for struct _objc_exception_data.
- llvm::Type *ExceptionDataTy;
+ llvm::StructType *ExceptionDataTy;
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
llvm::Constant *getExceptionTryEnterFn() {
@@ -608,25 +610,25 @@ public:
llvm::StructType *MethodListnfABITy;
// MethodListnfABIPtrTy - LLVM for struct _method_list_t*
- llvm::Type *MethodListnfABIPtrTy;
+ llvm::PointerType *MethodListnfABIPtrTy;
// ProtocolnfABITy = LLVM for struct _protocol_t
llvm::StructType *ProtocolnfABITy;
// ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
- llvm::Type *ProtocolnfABIPtrTy;
+ llvm::PointerType *ProtocolnfABIPtrTy;
// ProtocolListnfABITy - LLVM for struct _objc_protocol_list
llvm::StructType *ProtocolListnfABITy;
// ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
- llvm::Type *ProtocolListnfABIPtrTy;
+ llvm::PointerType *ProtocolListnfABIPtrTy;
// ClassnfABITy - LLVM for struct _class_t
llvm::StructType *ClassnfABITy;
// ClassnfABIPtrTy - LLVM for struct _class_t*
- llvm::Type *ClassnfABIPtrTy;
+ llvm::PointerType *ClassnfABIPtrTy;
// IvarnfABITy - LLVM for struct _ivar_t
llvm::StructType *IvarnfABITy;
@@ -635,13 +637,13 @@ public:
llvm::StructType *IvarListnfABITy;
// IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
- llvm::Type *IvarListnfABIPtrTy;
+ llvm::PointerType *IvarListnfABIPtrTy;
// ClassRonfABITy - LLVM for struct _class_ro_t
llvm::StructType *ClassRonfABITy;
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- llvm::Type *ImpnfABITy;
+ llvm::PointerType *ImpnfABITy;
// CategorynfABITy - LLVM for struct _category_t
llvm::StructType *CategorynfABITy;
@@ -670,7 +672,7 @@ public:
llvm::StructType *SuperMessageRefTy;
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
- llvm::Type *SuperMessageRefPtrTy;
+ llvm::PointerType *SuperMessageRefPtrTy;
llvm::Constant *getMessageSendFixupFn() {
// id objc_msgSend_fixup(id, struct message_ref_t*, ...)
@@ -733,6 +735,13 @@ public:
ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
};
+enum class ObjCLabelType {
+ ClassName,
+ MethodVarName,
+ MethodVarType,
+ PropertyName,
+};
+
class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
public:
class SKIP_SCAN {
@@ -836,7 +845,7 @@ protected:
llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
/// DefinedCategoryNames - list of category names in form Class_Category.
- llvm::SmallSetVector<std::string, 16> DefinedCategoryNames;
+ llvm::SmallSetVector<llvm::CachedHashString, 16> DefinedCategoryNames;
/// MethodVarTypes - uniqued method type signatures. We have to use
/// a StringMap here because have no other unique reference.
@@ -879,6 +888,15 @@ protected:
/// DefinedNonLazyCategories - List of defined "non-lazy" categories.
SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyCategories;
+ /// Cached reference to the class for constant strings. This value has type
+ /// int * but is actually an Obj-C class pointer.
+ llvm::WeakVH ConstantStringClassRef;
+
+ /// \brief The LLVM type corresponding to NSConstantString.
+ llvm::StructType *NSConstantStringType = nullptr;
+
+ llvm::StringMap<llvm::GlobalVariable *> NSConstantStringMap;
+
/// GetNameForMethod - Return a name for the given method.
/// \param[out] NameOut - The return value.
void GetNameForMethod(const ObjCMethodDecl *OMD,
@@ -979,15 +997,6 @@ protected:
ArrayRef<llvm::Constant*> MethodTypes,
const ObjCCommonTypesHelper &ObjCTypes);
- /// PushProtocolProperties - Push protocol's property on the input stack.
- void PushProtocolProperties(
- llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
- SmallVectorImpl<llvm::Constant*> &Properties,
- const Decl *Container,
- const ObjCProtocolDecl *Proto,
- const ObjCCommonTypesHelper &ObjCTypes,
- bool IsClassProperty);
-
/// GetProtocolRef - Return a reference to the internal protocol
/// description, creating an empty one if it has not been
/// defined. The return value has type ProtocolPtrTy.
@@ -1009,15 +1018,25 @@ public:
///
/// \param Name - The variable name.
/// \param Init - The variable initializer; this is also used to
- /// define the type of the variable.
+ /// define the type of the variable.
/// \param Section - The section the variable should go into, or empty.
/// \param Align - The alignment for the variable, or 0.
/// \param AddToUsed - Whether the variable should be added to
- /// "llvm.used".
- llvm::GlobalVariable *CreateMetadataVar(Twine Name, llvm::Constant *Init,
+ /// "llvm.used".
+ llvm::GlobalVariable *CreateMetadataVar(Twine Name,
+ ConstantStructBuilder &Init,
+ StringRef Section, CharUnits Align,
+ bool AddToUsed);
+ llvm::GlobalVariable *CreateMetadataVar(Twine Name,
+ llvm::Constant *Init,
StringRef Section, CharUnits Align,
bool AddToUsed);
+ llvm::GlobalVariable *CreateCStringLiteral(StringRef Name,
+ ObjCLabelType LabelType,
+ bool ForceNonFragileABI = false,
+ bool NullTerminate = true);
+
protected:
CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
ReturnValueSlot Return,
@@ -1044,6 +1063,7 @@ public:
}
ConstantAddress GenerateConstantString(const StringLiteral *SL) override;
+ ConstantAddress GenerateConstantNSString(const StringLiteral *SL);
llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD=nullptr) override;
@@ -1060,6 +1080,9 @@ public:
/// forward references will be filled in with empty bodies if no
/// definition is seen. The return value has type ProtocolPtrTy.
virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+
+ virtual llvm::Constant *getNSConstantStringClassRef() = 0;
+
llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CGBlockInfo &blockInfo) override;
llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
@@ -1069,8 +1092,95 @@ public:
QualType T) override;
};
+namespace {
+
+enum class MethodListType {
+ CategoryInstanceMethods,
+ CategoryClassMethods,
+ InstanceMethods,
+ ClassMethods,
+ ProtocolInstanceMethods,
+ ProtocolClassMethods,
+ OptionalProtocolInstanceMethods,
+ OptionalProtocolClassMethods,
+};
+
+/// A convenience class for splitting the methods of a protocol into
+/// the four interesting groups.
+class ProtocolMethodLists {
+public:
+ enum Kind {
+ RequiredInstanceMethods,
+ RequiredClassMethods,
+ OptionalInstanceMethods,
+ OptionalClassMethods
+ };
+ enum {
+ NumProtocolMethodLists = 4
+ };
+
+ static MethodListType getMethodListKind(Kind kind) {
+ switch (kind) {
+ case RequiredInstanceMethods:
+ return MethodListType::ProtocolInstanceMethods;
+ case RequiredClassMethods:
+ return MethodListType::ProtocolClassMethods;
+ case OptionalInstanceMethods:
+ return MethodListType::OptionalProtocolInstanceMethods;
+ case OptionalClassMethods:
+ return MethodListType::OptionalProtocolClassMethods;
+ }
+ llvm_unreachable("bad kind");
+ }
+
+ SmallVector<const ObjCMethodDecl *, 4> Methods[NumProtocolMethodLists];
+
+ static ProtocolMethodLists get(const ObjCProtocolDecl *PD) {
+ ProtocolMethodLists result;
+
+ for (auto MD : PD->methods()) {
+ size_t index = (2 * size_t(MD->isOptional()))
+ + (size_t(MD->isClassMethod()));
+ result.Methods[index].push_back(MD);
+ }
+
+ return result;
+ }
+
+ template <class Self>
+ SmallVector<llvm::Constant*, 8> emitExtendedTypesArray(Self *self) const {
+ // In both ABIs, the method types list is parallel with the
+ // concatenation of the methods arrays in the following order:
+ // instance methods
+ // class methods
+ // optional instance methods
+ // optional class methods
+ SmallVector<llvm::Constant*, 8> result;
+
+ // Methods is already in the correct order for both ABIs.
+ for (auto &list : Methods) {
+ for (auto MD : list) {
+ result.push_back(self->GetMethodVarType(MD, true));
+ }
+ }
+
+ return result;
+ }
+
+ template <class Self>
+ llvm::Constant *emitMethodList(Self *self, const ObjCProtocolDecl *PD,
+ Kind kind) const {
+ return self->emitMethodList(PD->getObjCRuntimeNameAsString(),
+ getMethodListKind(kind), Methods[kind]);
+ }
+};
+
+} // end anonymous namespace
+
class CGObjCMac : public CGObjCCommonMac {
private:
+ friend ProtocolMethodLists;
+
ObjCTypesHelper ObjCTypes;
/// EmitModuleInfo - Another marker encoding module level
@@ -1091,7 +1201,7 @@ private:
llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID,
CharUnits instanceSize,
bool hasMRCWeakIvars,
- bool isClassProperty);
+ bool isMetaclass);
/// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given class.
@@ -1123,30 +1233,18 @@ private:
/// given implementation. The return value has type ClassPtrTy.
llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
- ArrayRef<llvm::Constant*> Methods);
+ ArrayRef<const ObjCMethodDecl *> Methods);
- llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+ void emitMethodConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD);
- llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+ void emitMethodDescriptionConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD);
/// EmitMethodList - Emit the method list for the given
/// implementation. The return value has type MethodListPtrTy.
- llvm::Constant *EmitMethodList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods);
-
- /// EmitMethodDescList - Emit a method description list for a list of
- /// method declarations.
- /// - TypeName: The name for the type containing the methods.
- /// - IsProtocol: True iff these methods are for a protocol.
- /// - ClassMethds: True iff these are class methods.
- /// - Required: When true, only "required" methods are
- /// listed. Similarly, when false only "optional" methods are
- /// listed. For classes this should always be true.
- /// - begin, end: The method list to output.
- ///
- /// The return value has type MethodDescriptionListPtrTy.
- llvm::Constant *EmitMethodDescList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods);
+ llvm::Constant *emitMethodList(Twine Name, MethodListType MLT,
+ ArrayRef<const ObjCMethodDecl *> Methods);
/// GetOrEmitProtocol - Get the protocol object for the given
/// declaration, emitting it if necessary. The return value has type
@@ -1165,9 +1263,7 @@ private:
/// ProtocolExtensionPtrTy.
llvm::Constant *
EmitProtocolExtension(const ObjCProtocolDecl *PD,
- ArrayRef<llvm::Constant*> OptInstanceMethods,
- ArrayRef<llvm::Constant*> OptClassMethods,
- ArrayRef<llvm::Constant*> MethodTypesExt);
+ const ProtocolMethodLists &methodLists);
/// EmitProtocolList - Generate the list of referenced
/// protocols. The return value has type ProtocolListPtrTy.
@@ -1183,6 +1279,8 @@ private:
public:
CGObjCMac(CodeGen::CodeGenModule &cgm);
+ llvm::Constant *getNSConstantStringClassRef() override;
+
llvm::Function *ModuleInitFunction() override;
CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
@@ -1262,20 +1360,14 @@ public:
llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) override;
-
- /// GetClassGlobal - Return the global variable for the Objective-C
- /// class of the given name.
- llvm::GlobalVariable *GetClassGlobal(StringRef Name,
- bool Weak = false) override {
- llvm_unreachable("CGObjCMac::GetClassGlobal");
- }
};
class CGObjCNonFragileABIMac : public CGObjCCommonMac {
private:
+ friend ProtocolMethodLists;
ObjCNonFragileABITypesHelper ObjCTypes;
llvm::GlobalVariable* ObjCEmptyCacheVar;
- llvm::GlobalVariable* ObjCEmptyVtableVar;
+ llvm::Constant* ObjCEmptyVtableVar;
/// SuperClassReferences - uniqued super class references.
llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
@@ -1310,21 +1402,22 @@ private:
unsigned InstanceStart,
unsigned InstanceSize,
const ObjCImplementationDecl *ID);
- llvm::GlobalVariable * BuildClassMetaData(const std::string &ClassName,
- llvm::Constant *IsAGV,
- llvm::Constant *SuperClassGV,
- llvm::Constant *ClassRoGV,
- bool HiddenVisibility,
- bool Weak);
-
- llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+ llvm::GlobalVariable *BuildClassObject(const ObjCInterfaceDecl *CI,
+ bool isMetaclass,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility);
+
+ void emitMethodConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD,
+ bool forProtocol);
+
+ /// Emit the method list for the given implementation. The return value
+ /// has type MethodListnfABITy.
+ llvm::Constant *emitMethodList(Twine Name, MethodListType MLT,
+ ArrayRef<const ObjCMethodDecl *> Methods);
- llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
-
- /// EmitMethodList - Emit the method list for the given
- /// implementation. The return value has type MethodListnfABITy.
- llvm::Constant *EmitMethodList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods);
/// EmitIvarList - Emit the ivar list for the given
/// implementation. If ForClass is true the list of class ivars
/// (i.e. metaclass ivars) is emitted, otherwise the list of
@@ -1365,8 +1458,12 @@ private:
/// GetClassGlobal - Return the global variable for the Objective-C
/// class of the given name.
- llvm::GlobalVariable *GetClassGlobal(StringRef Name,
- bool Weak = false) override;
+ llvm::Constant *GetClassGlobal(StringRef Name,
+ ForDefinition_t IsForDefinition,
+ bool Weak = false, bool DLLImport = false);
+ llvm::Constant *GetClassGlobal(const ObjCInterfaceDecl *ID,
+ bool isMetaclass,
+ ForDefinition_t isForDefinition);
/// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given class reference.
@@ -1374,7 +1471,7 @@ private:
const ObjCInterfaceDecl *ID);
llvm::Value *EmitClassRefFromId(CodeGenFunction &CGF,
- IdentifierInfo *II, bool Weak,
+ IdentifierInfo *II,
const ObjCInterfaceDecl *ID);
llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override;
@@ -1404,7 +1501,7 @@ private:
/// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
/// interface. The return value has type EHTypePtrTy.
llvm::Constant *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
- bool ForDefinition);
+ ForDefinition_t IsForDefinition);
StringRef getMetaclassSymbolPrefix() const { return "OBJC_METACLASS_$_"; }
@@ -1451,7 +1548,9 @@ private:
public:
CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
- // FIXME. All stubs for now!
+
+ llvm::Constant *getNSConstantStringClassRef() override;
+
llvm::Function *ModuleInitFunction() override;
CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
@@ -1761,11 +1860,115 @@ llvm::Constant *CGObjCMac::GetEHType(QualType T) {
};
*/
-ConstantAddress CGObjCCommonMac::GenerateConstantString(
- const StringLiteral *SL) {
- return (CGM.getLangOpts().NoConstantCFStrings == 0 ?
- CGM.GetAddrOfConstantCFString(SL) :
- CGM.GetAddrOfConstantString(SL));
+ConstantAddress
+CGObjCCommonMac::GenerateConstantString(const StringLiteral *SL) {
+ return (!CGM.getLangOpts().NoConstantCFStrings
+ ? CGM.GetAddrOfConstantCFString(SL)
+ : GenerateConstantNSString(SL));
+}
+
+static llvm::StringMapEntry<llvm::GlobalVariable *> &
+GetConstantStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
+ const StringLiteral *Literal, unsigned &StringLength) {
+ StringRef String = Literal->getString();
+ StringLength = String.size();
+ return *Map.insert(std::make_pair(String, nullptr)).first;
+}
+
+llvm::Constant *CGObjCMac::getNSConstantStringClassRef() {
+ if (llvm::Value *V = ConstantStringClassRef)
+ return cast<llvm::Constant>(V);
+
+ auto &StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+ std::string str =
+ StringClass.empty() ? "_NSConstantStringClassReference"
+ : "_" + StringClass + "ClassReference";
+
+ llvm::Type *PTy = llvm::ArrayType::get(CGM.IntTy, 0);
+ auto GV = CGM.CreateRuntimeVariable(PTy, str);
+ auto V = llvm::ConstantExpr::getBitCast(GV, CGM.IntTy->getPointerTo());
+ ConstantStringClassRef = V;
+ return V;
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::getNSConstantStringClassRef() {
+ if (llvm::Value *V = ConstantStringClassRef)
+ return cast<llvm::Constant>(V);
+
+ auto &StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+ std::string str =
+ StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
+ : "OBJC_CLASS_$_" + StringClass;
+ auto GV = GetClassGlobal(str, NotForDefinition);
+
+ // Make sure the result is of the correct type.
+ auto V = llvm::ConstantExpr::getBitCast(GV, CGM.IntTy->getPointerTo());
+
+ ConstantStringClassRef = V;
+ return V;
+}
+
+ConstantAddress
+CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
+ GetConstantStringEntry(NSConstantStringMap, Literal, StringLength);
+
+ if (auto *C = Entry.second)
+ return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
+
+ // If we don't already have it, get _NSConstantStringClassReference.
+ llvm::Constant *Class = getNSConstantStringClassRef();
+
+ // If we don't already have it, construct the type for a constant NSString.
+ if (!NSConstantStringType) {
+ NSConstantStringType =
+ llvm::StructType::create({
+ CGM.Int32Ty->getPointerTo(),
+ CGM.Int8PtrTy,
+ CGM.IntTy
+ }, "struct.__builtin_NSString");
+ }
+
+ ConstantInitBuilder Builder(CGM);
+ auto Fields = Builder.beginStruct(NSConstantStringType);
+
+ // Class pointer.
+ Fields.add(Class);
+
+ // String pointer.
+ llvm::Constant *C =
+ llvm::ConstantDataArray::getString(VMContext, Entry.first());
+
+ llvm::GlobalValue::LinkageTypes Linkage = llvm::GlobalValue::PrivateLinkage;
+ bool isConstant = !CGM.getLangOpts().WritableStrings;
+
+ auto *GV = new llvm::GlobalVariable(CGM.getModule(), C->getType(), isConstant,
+ Linkage, C, ".str");
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ // Don't enforce the target's minimum global alignment, since the only use
+ // of the string is via this class initializer.
+ GV->setAlignment(1);
+ Fields.addBitCast(GV, CGM.Int8PtrTy);
+
+ // String length.
+ Fields.addInt(CGM.IntTy, StringLength);
+
+ // The struct.
+ CharUnits Alignment = CGM.getPointerAlign();
+ GV = Fields.finishAndCreateGlobal("_unnamed_nsstring_", Alignment,
+ /*constant*/ true,
+ llvm::GlobalVariable::PrivateLinkage);
+ const char *NSStringSection = "__OBJC,__cstring_object,regular,no_dead_strip";
+ const char *NSStringNonFragileABISection =
+ "__DATA,__objc_stringobj,regular,no_dead_strip";
+ // FIXME. Fix section.
+ GV->setSection(CGM.getLangOpts().ObjCRuntime.isNonFragile()
+ ? NSStringNonFragileABISection
+ : NSStringSection);
+ Entry.second = GV;
+
+ return ConstantAddress(GV, Alignment);
}
enum {
@@ -1953,8 +2156,9 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
llvm::Instruction *CallSite;
Fn = llvm::ConstantExpr::getBitCast(Fn, MSI.MessengerType);
- RValue rvalue = CGF.EmitCall(MSI.CallInfo, Fn, Return, ActualArgs,
- CGCalleeInfo(), &CallSite);
+ CGCallee Callee = CGCallee::forDirect(Fn);
+ RValue rvalue = CGF.EmitCall(MSI.CallInfo, Callee, Return, ActualArgs,
+ &CallSite);
// Mark the call as noreturn if the method is marked noreturn and the
// receiver cannot be null.
@@ -2576,10 +2780,9 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
}
}
- llvm::GlobalVariable *Entry = CreateMetadataVar(
- "OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext, BitMap, false),
- "__TEXT,__objc_classname,cstring_literals", CharUnits::One(), true);
+ auto *Entry = CreateCStringLiteral(BitMap, ObjCLabelType::ClassName,
+ /*ForceNonFragileABI=*/true,
+ /*NullTerminate=*/false);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -2730,66 +2933,29 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
// Construct method lists.
- std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
- std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
- std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
- for (const auto *MD : PD->instance_methods()) {
- llvm::Constant *C = GetMethodDescriptionConstant(MD);
- if (!C)
- return GetOrEmitProtocolRef(PD);
-
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
- OptInstanceMethods.push_back(C);
- OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
- } else {
- InstanceMethods.push_back(C);
- MethodTypesExt.push_back(GetMethodVarType(MD, true));
- }
- }
-
- for (const auto *MD : PD->class_methods()) {
- llvm::Constant *C = GetMethodDescriptionConstant(MD);
- if (!C)
- return GetOrEmitProtocolRef(PD);
-
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
- OptClassMethods.push_back(C);
- OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
- } else {
- ClassMethods.push_back(C);
- MethodTypesExt.push_back(GetMethodVarType(MD, true));
- }
- }
-
- MethodTypesExt.insert(MethodTypesExt.end(),
- OptMethodTypesExt.begin(), OptMethodTypesExt.end());
-
- llvm::Constant *Values[] = {
- EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
- MethodTypesExt),
- GetClassName(PD->getObjCRuntimeNameAsString()),
- EmitProtocolList("OBJC_PROTOCOL_REFS_" + PD->getName(),
- PD->protocol_begin(), PD->protocol_end()),
- EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- InstanceMethods),
- EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- ClassMethods)};
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
- Values);
+ auto methodLists = ProtocolMethodLists::get(PD);
+
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ProtocolTy);
+ values.add(EmitProtocolExtension(PD, methodLists));
+ values.add(GetClassName(PD->getObjCRuntimeNameAsString()));
+ values.add(EmitProtocolList("OBJC_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(), PD->protocol_end()));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::RequiredInstanceMethods));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::RequiredClassMethods));
if (Entry) {
// Already created, update the initializer.
assert(Entry->hasPrivateLinkage());
- Entry->setInitializer(Init);
+ values.finishAndSetAsInitializer(Entry);
} else {
- Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy,
- false, llvm::GlobalValue::PrivateLinkage,
- Init, "OBJC_PROTOCOL_" + PD->getName());
+ Entry = values.finishAndCreateGlobal("OBJC_PROTOCOL_" + PD->getName(),
+ CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
- // FIXME: Is this necessary? Why only for protocol?
- Entry->setAlignment(4);
Protocols[PD->getIdentifier()] = Entry;
}
@@ -2828,37 +2994,49 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
*/
llvm::Constant *
CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
- ArrayRef<llvm::Constant*> OptInstanceMethods,
- ArrayRef<llvm::Constant*> OptClassMethods,
- ArrayRef<llvm::Constant*> MethodTypesExt) {
- uint64_t Size =
- CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
- llvm::Constant *Values[] = {
- llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
- EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_OPT_" + PD->getName(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- OptInstanceMethods),
- EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- OptClassMethods),
- EmitPropertyList("OBJC_$_PROP_PROTO_LIST_" + PD->getName(), nullptr, PD,
- ObjCTypes, false),
- EmitProtocolMethodTypes("OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
- MethodTypesExt, ObjCTypes),
- EmitPropertyList("OBJC_$_CLASS_PROP_PROTO_LIST_" + PD->getName(), nullptr,
- PD, ObjCTypes, true)};
+ const ProtocolMethodLists &methodLists) {
+ auto optInstanceMethods =
+ methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::OptionalInstanceMethods);
+ auto optClassMethods =
+ methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::OptionalClassMethods);
+
+ auto extendedMethodTypes =
+ EmitProtocolMethodTypes("OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
+ methodLists.emitExtendedTypesArray(this),
+ ObjCTypes);
+
+ auto instanceProperties =
+ EmitPropertyList("OBJC_$_PROP_PROTO_LIST_" + PD->getName(), nullptr, PD,
+ ObjCTypes, false);
+ auto classProperties =
+ EmitPropertyList("OBJC_$_CLASS_PROP_PROTO_LIST_" + PD->getName(), nullptr,
+ PD, ObjCTypes, true);
// Return null if no extension bits are used.
- if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
- Values[3]->isNullValue() && Values[4]->isNullValue() &&
- Values[5]->isNullValue())
+ if (optInstanceMethods->isNullValue() &&
+ optClassMethods->isNullValue() &&
+ extendedMethodTypes->isNullValue() &&
+ instanceProperties->isNullValue() &&
+ classProperties->isNullValue()) {
return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+ }
- llvm::Constant *Init =
- llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
+ uint64_t size =
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ProtocolExtensionTy);
+ values.addInt(ObjCTypes.IntTy, size);
+ values.add(optInstanceMethods);
+ values.add(optClassMethods);
+ values.add(instanceProperties);
+ values.add(extendedMethodTypes);
+ values.add(classProperties);
// No special section, but goes in llvm.used
- return CreateMetadataVar("\01l_OBJC_PROTOCOLEXT_" + PD->getName(), Init,
+ return CreateMetadataVar("\01l_OBJC_PROTOCOLEXT_" + PD->getName(), values,
StringRef(), CGM.getPointerAlign(), true);
}
@@ -2870,59 +3048,57 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
};
*/
llvm::Constant *
-CGObjCMac::EmitProtocolList(Twine Name,
+CGObjCMac::EmitProtocolList(Twine name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end) {
- SmallVector<llvm::Constant *, 16> ProtocolRefs;
-
- for (; begin != end; ++begin)
- ProtocolRefs.push_back(GetProtocolRef(*begin));
-
// Just return null for empty protocol lists
- if (ProtocolRefs.empty())
+ if (begin == end)
return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
- // This list is null terminated.
- ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
- llvm::Constant *Values[3];
// This field is only used by the runtime.
- Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
- Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy,
- ProtocolRefs.size() - 1);
- Values[2] =
- llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy,
- ProtocolRefs.size()),
- ProtocolRefs);
-
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ values.addNullPointer(ObjCTypes.ProtocolListPtrTy);
+
+ // Reserve a slot for the count.
+ auto countSlot = values.addPlaceholder();
+
+ auto refsArray = values.beginArray(ObjCTypes.ProtocolPtrTy);
+ for (; begin != end; ++begin) {
+ refsArray.add(GetProtocolRef(*begin));
+ }
+ auto count = refsArray.size();
+
+ // This list is null terminated.
+ refsArray.addNullPointer(ObjCTypes.ProtocolPtrTy);
+
+ refsArray.finishAndAddTo(values);
+ values.fillPlaceholderWithInt(countSlot, ObjCTypes.LongTy, count);
+
+ StringRef section;
+ if (CGM.getTriple().isOSBinFormatMachO())
+ section = "__OBJC,__cat_cls_meth,regular,no_dead_strip";
+
llvm::GlobalVariable *GV =
- CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- CGM.getPointerAlign(), false);
+ CreateMetadataVar(name, values, section, CGM.getPointerAlign(), false);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
}
-void CGObjCCommonMac::
+static void
PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet,
- SmallVectorImpl<llvm::Constant *> &Properties,
- const Decl *Container,
+ SmallVectorImpl<const ObjCPropertyDecl *> &Properties,
const ObjCProtocolDecl *Proto,
- const ObjCCommonTypesHelper &ObjCTypes,
bool IsClassProperty) {
for (const auto *P : Proto->protocols())
- PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes,
- IsClassProperty);
+ PushProtocolProperties(PropertySet, Properties, P, IsClassProperty);
for (const auto *PD : Proto->properties()) {
if (IsClassProperty != PD->isClassProperty())
continue;
if (!PropertySet.insert(PD->getIdentifier()).second)
continue;
- llvm::Constant *Prop[] = {
- GetPropertyName(PD->getIdentifier()),
- GetPropertyTypeString(PD, Container)
- };
- Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
+ Properties.push_back(PD);
}
}
@@ -2952,21 +3128,16 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
}
- SmallVector<llvm::Constant *, 16> Properties;
+ SmallVector<const ObjCPropertyDecl *, 16> Properties;
llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
- auto AddProperty = [&](const ObjCPropertyDecl *PD) {
- llvm::Constant *Prop[] = {GetPropertyName(PD->getIdentifier()),
- GetPropertyTypeString(PD, Container)};
- Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
- };
if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
for (const ObjCCategoryDecl *ClassExt : OID->known_extensions())
for (auto *PD : ClassExt->properties()) {
if (IsClassProperty != PD->isClassProperty())
continue;
PropertySet.insert(PD->getIdentifier());
- AddProperty(PD);
+ Properties.push_back(PD);
}
for (const auto *PD : OCD->properties()) {
@@ -2976,40 +3147,45 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
// class extension.
if (!PropertySet.insert(PD->getIdentifier()).second)
continue;
- AddProperty(PD);
+ Properties.push_back(PD);
}
if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
for (const auto *P : OID->all_referenced_protocols())
- PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes,
- IsClassProperty);
+ PushProtocolProperties(PropertySet, Properties, P, IsClassProperty);
}
else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) {
for (const auto *P : CD->protocols())
- PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes,
- IsClassProperty);
+ PushProtocolProperties(PropertySet, Properties, P, IsClassProperty);
}
// Return null for empty list.
if (Properties.empty())
return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
- unsigned PropertySize =
+ unsigned propertySize =
CGM.getDataLayout().getTypeAllocSize(ObjCTypes.PropertyTy);
- llvm::Constant *Values[3];
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
- Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
- Properties.size());
- Values[2] = llvm::ConstantArray::get(AT, Properties);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ values.addInt(ObjCTypes.IntTy, propertySize);
+ values.addInt(ObjCTypes.IntTy, Properties.size());
+ auto propertiesArray = values.beginArray(ObjCTypes.PropertyTy);
+ for (auto PD : Properties) {
+ auto property = propertiesArray.beginStruct(ObjCTypes.PropertyTy);
+ property.add(GetPropertyName(PD->getIdentifier()));
+ property.add(GetPropertyTypeString(PD, Container));
+ property.finishAndAddTo(propertiesArray);
+ }
+ propertiesArray.finishAndAddTo(values);
+
+ StringRef Section;
+ if (CGM.getTriple().isOSBinFormatMachO())
+ Section = (ObjCABI == 2) ? "__DATA, __objc_const"
+ : "__OBJC,__property,regular,no_dead_strip";
llvm::GlobalVariable *GV =
- CreateMetadataVar(Name, Init,
- (ObjCABI == 2) ? "__DATA, __objc_const" :
- "__OBJC,__property,regular,no_dead_strip",
- CGM.getPointerAlign(),
- true);
+ CreateMetadataVar(Name, values, Section, CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
}
@@ -3025,50 +3201,13 @@ CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
MethodTypes.size());
llvm::Constant *Init = llvm::ConstantArray::get(AT, MethodTypes);
- llvm::GlobalVariable *GV = CreateMetadataVar(
- Name, Init, (ObjCABI == 2) ? "__DATA, __objc_const" : StringRef(),
- CGM.getPointerAlign(), true);
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
-}
-
-/*
- struct objc_method_description_list {
- int count;
- struct objc_method_description list[];
- };
-*/
-llvm::Constant *
-CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
- llvm::Constant *Desc[] = {
- llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy),
- GetMethodVarType(MD)
- };
- if (!Desc[1])
- return nullptr;
-
- return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
- Desc);
-}
-
-llvm::Constant *
-CGObjCMac::EmitMethodDescList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods) {
- // Return null for empty list.
- if (Methods.empty())
- return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
-
- llvm::Constant *Values[2];
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
- Methods.size());
- Values[1] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ StringRef Section;
+ if (CGM.getTriple().isOSBinFormatMachO() && ObjCABI == 2)
+ Section = "__DATA, __objc_const";
llvm::GlobalVariable *GV =
- CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
- return llvm::ConstantExpr::getBitCast(GV,
- ObjCTypes.MethodDescriptionListPtrTy);
+ CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
}
/*
@@ -3098,54 +3237,53 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_'
<< OCD->getName();
- SmallVector<llvm::Constant *, 16> InstanceMethods, ClassMethods;
- for (const auto *I : OCD->instance_methods())
- // Instance methods should always be defined.
- InstanceMethods.push_back(GetMethodConstant(I));
+ ConstantInitBuilder Builder(CGM);
+ auto Values = Builder.beginStruct(ObjCTypes.CategoryTy);
- for (const auto *I : OCD->class_methods())
- // Class methods should always be defined.
- ClassMethods.push_back(GetMethodConstant(I));
+ enum {
+ InstanceMethods,
+ ClassMethods,
+ NumMethodLists
+ };
+ SmallVector<const ObjCMethodDecl *, 16> Methods[NumMethodLists];
+ for (const auto *MD : OCD->methods()) {
+ Methods[unsigned(MD->isClassMethod())].push_back(MD);
+ }
- llvm::Constant *Values[8];
- Values[0] = GetClassName(OCD->getName());
- Values[1] = GetClassName(Interface->getObjCRuntimeNameAsString());
+ Values.add(GetClassName(OCD->getName()));
+ Values.add(GetClassName(Interface->getObjCRuntimeNameAsString()));
LazySymbols.insert(Interface->getIdentifier());
- Values[2] = EmitMethodList("OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- InstanceMethods);
- Values[3] = EmitMethodList("OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- ClassMethods);
+
+ Values.add(emitMethodList(ExtName, MethodListType::CategoryInstanceMethods,
+ Methods[InstanceMethods]));
+ Values.add(emitMethodList(ExtName, MethodListType::CategoryClassMethods,
+ Methods[ClassMethods]));
if (Category) {
- Values[4] =
+ Values.add(
EmitProtocolList("OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
- Category->protocol_begin(), Category->protocol_end());
+ Category->protocol_begin(), Category->protocol_end()));
} else {
- Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values.addNullPointer(ObjCTypes.ProtocolListPtrTy);
}
- Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values.addInt(ObjCTypes.IntTy, Size);
// If there is no category @interface then there can be no properties.
if (Category) {
- Values[6] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
- OCD, Category, ObjCTypes, false);
- Values[7] = EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
- OCD, Category, ObjCTypes, true);
+ Values.add(EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes, false));
+ Values.add(EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes, true));
} else {
- Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
- Values[7] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ Values.addNullPointer(ObjCTypes.PropertyListPtrTy);
+ Values.addNullPointer(ObjCTypes.PropertyListPtrTy);
}
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy,
- Values);
-
llvm::GlobalVariable *GV =
- CreateMetadataVar("OBJC_CATEGORY_" + ExtName.str(), Init,
+ CreateMetadataVar("OBJC_CATEGORY_" + ExtName.str(), Values,
"__OBJC,__category,regular,no_dead_strip",
CGM.getPointerAlign(), true);
DefinedCategories.push_back(GV);
- DefinedCategoryNames.insert(ExtName.str());
+ DefinedCategoryNames.insert(llvm::CachedHashString(ExtName));
// method definition entries must be clear for next implementation.
MethodDefinitions.clear();
}
@@ -3282,57 +3420,56 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= FragileABI_Class_Hidden;
- SmallVector<llvm::Constant *, 16> InstanceMethods, ClassMethods;
- for (const auto *I : ID->instance_methods())
- // Instance methods should always be defined.
- InstanceMethods.push_back(GetMethodConstant(I));
-
- for (const auto *I : ID->class_methods())
- // Class methods should always be defined.
- ClassMethods.push_back(GetMethodConstant(I));
+ enum {
+ InstanceMethods,
+ ClassMethods,
+ NumMethodLists
+ };
+ SmallVector<const ObjCMethodDecl *, 16> Methods[NumMethodLists];
+ for (const auto *MD : ID->methods()) {
+ Methods[unsigned(MD->isClassMethod())].push_back(MD);
+ }
for (const auto *PID : ID->property_impls()) {
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
ObjCPropertyDecl *PD = PID->getPropertyDecl();
if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
- if (llvm::Constant *C = GetMethodConstant(MD))
- InstanceMethods.push_back(C);
+ if (GetMethodDefinition(MD))
+ Methods[InstanceMethods].push_back(MD);
if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
- if (llvm::Constant *C = GetMethodConstant(MD))
- InstanceMethods.push_back(C);
+ if (GetMethodDefinition(MD))
+ Methods[InstanceMethods].push_back(MD);
}
}
- llvm::Constant *Values[12];
- Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ClassTy);
+ values.add(EmitMetaClass(ID, Protocols, Methods[ClassMethods]));
if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
// Record a reference to the super class.
LazySymbols.insert(Super->getIdentifier());
- Values[ 1] =
- llvm::ConstantExpr::getBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
- ObjCTypes.ClassPtrTy);
+ values.addBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
+ ObjCTypes.ClassPtrTy);
} else {
- Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ values.addNullPointer(ObjCTypes.ClassPtrTy);
}
- Values[ 2] = GetClassName(ID->getObjCRuntimeNameAsString());
+ values.add(GetClassName(ID->getObjCRuntimeNameAsString()));
// Version is always 0.
- Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
- Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
- Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size.getQuantity());
- Values[ 6] = EmitIvarList(ID, false);
- Values[7] = EmitMethodList("OBJC_INSTANCE_METHODS_" + ID->getName(),
- "__OBJC,__inst_meth,regular,no_dead_strip",
- InstanceMethods);
+ values.addInt(ObjCTypes.LongTy, 0);
+ values.addInt(ObjCTypes.LongTy, Flags);
+ values.addInt(ObjCTypes.LongTy, Size.getQuantity());
+ values.add(EmitIvarList(ID, false));
+ values.add(emitMethodList(ID->getName(), MethodListType::InstanceMethods,
+ Methods[InstanceMethods]));
// cache is always NULL.
- Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
- Values[ 9] = Protocols;
- Values[10] = BuildStrongIvarLayout(ID, CharUnits::Zero(), Size);
- Values[11] = EmitClassExtension(ID, Size, hasMRCWeak,
- false/*isClassProperty*/);
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
- Values);
+ values.addNullPointer(ObjCTypes.CachePtrTy);
+ values.add(Protocols);
+ values.add(BuildStrongIvarLayout(ID, CharUnits::Zero(), Size));
+ values.add(EmitClassExtension(ID, Size, hasMRCWeak,
+ /*isMetaclass*/ false));
+
std::string Name("OBJC_CLASS_");
Name += ClassName;
const char *Section = "__OBJC,__class,regular,no_dead_strip";
@@ -3341,12 +3478,12 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
if (GV) {
assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
- GV->setInitializer(Init);
+ values.finishAndSetAsInitializer(GV);
GV->setSection(Section);
GV->setAlignment(CGM.getPointerAlign().getQuantity());
CGM.addCompilerUsedGlobal(GV);
} else
- GV = CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
+ GV = CreateMetadataVar(Name, values, Section, CGM.getPointerAlign(), true);
DefinedClasses.push_back(GV);
ImplementedClasses.push_back(Interface);
// method definition entries must be clear for next implementation.
@@ -3355,50 +3492,46 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
- ArrayRef<llvm::Constant*> Methods) {
+ ArrayRef<const ObjCMethodDecl*> Methods) {
unsigned Flags = FragileABI_Class_Meta;
unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassTy);
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= FragileABI_Class_Hidden;
- llvm::Constant *Values[12];
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ClassTy);
// The isa for the metaclass is the root of the hierarchy.
const ObjCInterfaceDecl *Root = ID->getClassInterface();
while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
Root = Super;
- Values[ 0] =
- llvm::ConstantExpr::getBitCast(GetClassName(Root->getObjCRuntimeNameAsString()),
- ObjCTypes.ClassPtrTy);
+ values.addBitCast(GetClassName(Root->getObjCRuntimeNameAsString()),
+ ObjCTypes.ClassPtrTy);
// The super class for the metaclass is emitted as the name of the
// super class. The runtime fixes this up to point to the
// *metaclass* for the super class.
if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
- Values[ 1] =
- llvm::ConstantExpr::getBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
- ObjCTypes.ClassPtrTy);
+ values.addBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
+ ObjCTypes.ClassPtrTy);
} else {
- Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ values.addNullPointer(ObjCTypes.ClassPtrTy);
}
- Values[ 2] = GetClassName(ID->getObjCRuntimeNameAsString());
+ values.add(GetClassName(ID->getObjCRuntimeNameAsString()));
// Version is always 0.
- Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
- Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
- Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
- Values[ 6] = EmitIvarList(ID, true);
- Values[7] =
- EmitMethodList("OBJC_CLASS_METHODS_" + ID->getNameAsString(),
- "__OBJC,__cls_meth,regular,no_dead_strip", Methods);
+ values.addInt(ObjCTypes.LongTy, 0);
+ values.addInt(ObjCTypes.LongTy, Flags);
+ values.addInt(ObjCTypes.LongTy, Size);
+ values.add(EmitIvarList(ID, true));
+ values.add(emitMethodList(ID->getName(), MethodListType::ClassMethods,
+ Methods));
// cache is always NULL.
- Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
- Values[ 9] = Protocols;
+ values.addNullPointer(ObjCTypes.CachePtrTy);
+ values.add(Protocols);
// ivar_layout for metaclass is always NULL.
- Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ values.addNullPointer(ObjCTypes.Int8PtrTy);
// The class extension is used to store class properties for metaclasses.
- Values[11] = EmitClassExtension(ID, CharUnits::Zero(), false/*hasMRCWeak*/,
- true/*isClassProperty*/);
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
- Values);
+ values.add(EmitClassExtension(ID, CharUnits::Zero(), false/*hasMRCWeak*/,
+ /*isMetaclass*/true));
std::string Name("OBJC_METACLASS_");
Name += ID->getName();
@@ -3408,14 +3541,13 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
if (GV) {
assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
- GV->setInitializer(Init);
+ values.finishAndSetAsInitializer(GV);
} else {
- GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
- llvm::GlobalValue::PrivateLinkage,
- Init, Name);
+ GV = values.finishAndCreateGlobal(Name, CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
}
GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
- GV->setAlignment(4);
CGM.addCompilerUsedGlobal(GV);
return GV;
@@ -3471,32 +3603,38 @@ llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
llvm::Constant *
CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID,
CharUnits InstanceSize, bool hasMRCWeakIvars,
- bool isClassProperty) {
- uint64_t Size =
- CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+ bool isMetaclass) {
+ // Weak ivar layout.
+ llvm::Constant *layout;
+ if (isMetaclass) {
+ layout = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+ } else {
+ layout = BuildWeakIvarLayout(ID, CharUnits::Zero(), InstanceSize,
+ hasMRCWeakIvars);
+ }
- llvm::Constant *Values[3];
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- if (isClassProperty) {
- llvm::Type *PtrTy = CGM.Int8PtrTy;
- Values[1] = llvm::Constant::getNullValue(PtrTy);
- } else
- Values[1] = BuildWeakIvarLayout(ID, CharUnits::Zero(), InstanceSize,
- hasMRCWeakIvars);
- if (isClassProperty)
- Values[2] = EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ID->getName(),
- ID, ID->getClassInterface(), ObjCTypes, true);
- else
- Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
- ID, ID->getClassInterface(), ObjCTypes, false);
+ // Properties.
+ llvm::Constant *propertyList =
+ EmitPropertyList((isMetaclass ? Twine("\01l_OBJC_$_CLASS_PROP_LIST_")
+ : Twine("\01l_OBJC_$_PROP_LIST_"))
+ + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes, isMetaclass);
// Return null if no extension bits are used.
- if ((!Values[1] || Values[1]->isNullValue()) && Values[2]->isNullValue())
+ if (layout->isNullValue() && propertyList->isNullValue()) {
return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+ }
- llvm::Constant *Init =
- llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
- return CreateMetadataVar("OBJC_CLASSEXT_" + ID->getName(), Init,
+ uint64_t size =
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ClassExtensionTy);
+ values.addInt(ObjCTypes.IntTy, size);
+ values.add(layout);
+ values.add(propertyList);
+
+ return CreateMetadataVar("OBJC_CLASSEXT_" + ID->getName(), values,
"__OBJC,__class_ext,regular,no_dead_strip",
CGM.getPointerAlign(), true);
}
@@ -3515,8 +3653,6 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID,
*/
llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
bool ForClass) {
- std::vector<llvm::Constant*> Ivars;
-
// When emitting the root class GCC emits ivar entries for the
// actual class structure. It is not clear if we need to follow this
// behavior; for now lets try and get away with not doing it. If so,
@@ -3527,91 +3663,181 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ ConstantInitBuilder builder(CGM);
+ auto ivarList = builder.beginStruct();
+ auto countSlot = ivarList.addPlaceholder();
+ auto ivars = ivarList.beginArray(ObjCTypes.IvarTy);
+
for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar()) {
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
- llvm::Constant *Ivar[] = {
- GetMethodVarName(IVD->getIdentifier()),
- GetMethodVarType(IVD),
- llvm::ConstantInt::get(ObjCTypes.IntTy,
- ComputeIvarBaseOffset(CGM, OID, IVD))
- };
- Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
+
+ auto ivar = ivars.beginStruct(ObjCTypes.IvarTy);
+ ivar.add(GetMethodVarName(IVD->getIdentifier()));
+ ivar.add(GetMethodVarType(IVD));
+ ivar.addInt(ObjCTypes.IntTy, ComputeIvarBaseOffset(CGM, OID, IVD));
+ ivar.finishAndAddTo(ivars);
}
// Return null for empty list.
- if (Ivars.empty())
+ auto count = ivars.size();
+ if (count == 0) {
+ ivars.abandon();
+ ivarList.abandon();
return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+ }
- llvm::Constant *Values[2];
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
- Ivars.size());
- Values[1] = llvm::ConstantArray::get(AT, Ivars);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ ivars.finishAndAddTo(ivarList);
+ ivarList.fillPlaceholderWithInt(countSlot, ObjCTypes.IntTy, count);
llvm::GlobalVariable *GV;
if (ForClass)
GV =
- CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), Init,
+ CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), ivarList,
"__OBJC,__class_vars,regular,no_dead_strip",
CGM.getPointerAlign(), true);
else
- GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), Init,
+ GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), ivarList,
"__OBJC,__instance_vars,regular,no_dead_strip",
CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
}
-/*
- struct objc_method {
- SEL method_name;
- char *method_types;
- void *method;
- };
+/// Build a struct objc_method_description constant for the given method.
+///
+/// struct objc_method_description {
+/// SEL method_name;
+/// char *method_types;
+/// };
+void CGObjCMac::emitMethodDescriptionConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD) {
+ auto description = builder.beginStruct(ObjCTypes.MethodDescriptionTy);
+ description.addBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ description.add(GetMethodVarType(MD));
+ description.finishAndAddTo(builder);
+}
- struct objc_method_list {
- struct objc_method_list *obsolete;
- int count;
- struct objc_method methods_list[count];
- };
-*/
+/// Build a struct objc_method constant for the given method.
+///
+/// struct objc_method {
+/// SEL method_name;
+/// char *method_types;
+/// void *method;
+/// };
+void CGObjCMac::emitMethodConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD) {
+ llvm::Function *fn = GetMethodDefinition(MD);
+ assert(fn && "no definition registered for method");
-/// GetMethodConstant - Return a struct objc_method constant for the
-/// given method if it has been defined. The result is null if the
-/// method has not been defined. The return value has type MethodPtrTy.
-llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
- llvm::Function *Fn = GetMethodDefinition(MD);
- if (!Fn)
- return nullptr;
-
- llvm::Constant *Method[] = {
- llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy),
- GetMethodVarType(MD),
- llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
- };
- return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+ auto method = builder.beginStruct(ObjCTypes.MethodTy);
+ method.addBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ method.add(GetMethodVarType(MD));
+ method.addBitCast(fn, ObjCTypes.Int8PtrTy);
+ method.finishAndAddTo(builder);
}
-llvm::Constant *CGObjCMac::EmitMethodList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods) {
+/// Build a struct objc_method_list or struct objc_method_description_list,
+/// as appropriate.
+///
+/// struct objc_method_list {
+/// struct objc_method_list *obsolete;
+/// int count;
+/// struct objc_method methods_list[count];
+/// };
+///
+/// struct objc_method_description_list {
+/// int count;
+/// struct objc_method_description list[count];
+/// };
+llvm::Constant *CGObjCMac::emitMethodList(Twine name, MethodListType MLT,
+ ArrayRef<const ObjCMethodDecl *> methods) {
+ StringRef prefix;
+ StringRef section;
+ bool forProtocol = false;
+ switch (MLT) {
+ case MethodListType::CategoryInstanceMethods:
+ prefix = "OBJC_CATEGORY_INSTANCE_METHODS_";
+ section = "__OBJC,__cat_inst_meth,regular,no_dead_strip";
+ forProtocol = false;
+ break;
+ case MethodListType::CategoryClassMethods:
+ prefix = "OBJC_CATEGORY_CLASS_METHODS_";
+ section = "__OBJC,__cat_cls_meth,regular,no_dead_strip";
+ forProtocol = false;
+ break;
+ case MethodListType::InstanceMethods:
+ prefix = "OBJC_INSTANCE_METHODS_";
+ section = "__OBJC,__inst_meth,regular,no_dead_strip";
+ forProtocol = false;
+ break;
+ case MethodListType::ClassMethods:
+ prefix = "OBJC_CLASS_METHODS_";
+ section = "__OBJC,__cls_meth,regular,no_dead_strip";
+ forProtocol = false;
+ break;
+ case MethodListType::ProtocolInstanceMethods:
+ prefix = "OBJC_PROTOCOL_INSTANCE_METHODS_";
+ section = "__OBJC,__cat_inst_meth,regular,no_dead_strip";
+ forProtocol = true;
+ break;
+ case MethodListType::ProtocolClassMethods:
+ prefix = "OBJC_PROTOCOL_CLASS_METHODS_";
+ section = "__OBJC,__cat_cls_meth,regular,no_dead_strip";
+ forProtocol = true;
+ break;
+ case MethodListType::OptionalProtocolInstanceMethods:
+ prefix = "OBJC_PROTOCOL_INSTANCE_METHODS_OPT_";
+ section = "__OBJC,__cat_inst_meth,regular,no_dead_strip";
+ forProtocol = true;
+ break;
+ case MethodListType::OptionalProtocolClassMethods:
+ prefix = "OBJC_PROTOCOL_CLASS_METHODS_OPT_";
+ section = "__OBJC,__cat_cls_meth,regular,no_dead_strip";
+ forProtocol = true;
+ break;
+ }
+
// Return null for empty list.
- if (Methods.empty())
- return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
+ if (methods.empty())
+ return llvm::Constant::getNullValue(forProtocol
+ ? ObjCTypes.MethodDescriptionListPtrTy
+ : ObjCTypes.MethodListPtrTy);
+
+ // For protocols, this is an objc_method_description_list, which has
+ // a slightly different structure.
+ if (forProtocol) {
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ values.addInt(ObjCTypes.IntTy, methods.size());
+ auto methodArray = values.beginArray(ObjCTypes.MethodDescriptionTy);
+ for (auto MD : methods) {
+ emitMethodDescriptionConstant(methodArray, MD);
+ }
+ methodArray.finishAndAddTo(values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(prefix + name, values, section,
+ CGM.getPointerAlign(), true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodDescriptionListPtrTy);
+ }
- llvm::Constant *Values[3];
- Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
- Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
- Methods.size());
- Values[2] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ // Otherwise, it's an objc_method_list.
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ values.addNullPointer(ObjCTypes.Int8PtrTy);
+ values.addInt(ObjCTypes.IntTy, methods.size());
+ auto methodArray = values.beginArray(ObjCTypes.MethodTy);
+ for (auto MD : methods) {
+ emitMethodConstant(methodArray, MD);
+ }
+ methodArray.finishAndAddTo(values);
- llvm::GlobalVariable *GV =
- CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
+ llvm::GlobalVariable *GV = CreateMetadataVar(prefix + name, values, section,
+ CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy);
}
@@ -3634,6 +3860,21 @@ llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
}
llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
+ ConstantStructBuilder &Init,
+ StringRef Section,
+ CharUnits Align,
+ bool AddToUsed) {
+ llvm::GlobalVariable *GV =
+ Init.finishAndCreateGlobal(Name, Align, /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (!Section.empty())
+ GV->setSection(Section);
+ if (AddToUsed)
+ CGM.addCompilerUsedGlobal(GV);
+ return GV;
+}
+
+llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
llvm::Constant *Init,
StringRef Section,
CharUnits Align,
@@ -3650,6 +3891,54 @@ llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
return GV;
}
+llvm::GlobalVariable *
+CGObjCCommonMac::CreateCStringLiteral(StringRef Name, ObjCLabelType Type,
+ bool ForceNonFragileABI,
+ bool NullTerminate) {
+ StringRef Label;
+ switch (Type) {
+ case ObjCLabelType::ClassName: Label = "OBJC_CLASS_NAME_"; break;
+ case ObjCLabelType::MethodVarName: Label = "OBJC_METH_VAR_NAME_"; break;
+ case ObjCLabelType::MethodVarType: Label = "OBJC_METH_VAR_TYPE_"; break;
+ case ObjCLabelType::PropertyName: Label = "OBJC_PROP_NAME_ATTR_"; break;
+ }
+
+ bool NonFragile = ForceNonFragileABI || isNonFragileABI();
+
+ StringRef Section;
+ switch (Type) {
+ case ObjCLabelType::ClassName:
+ Section = NonFragile ? "__TEXT,__objc_classname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals";
+ break;
+ case ObjCLabelType::MethodVarName:
+ Section = NonFragile ? "__TEXT,__objc_methname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals";
+ break;
+ case ObjCLabelType::MethodVarType:
+ Section = NonFragile ? "__TEXT,__objc_methtype,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals";
+ break;
+ case ObjCLabelType::PropertyName:
+ Section = "__TEXT,__cstring,cstring_literals";
+ break;
+ }
+
+ llvm::Constant *Value =
+ llvm::ConstantDataArray::getString(VMContext, Name, NullTerminate);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Value->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage, Value, Label);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GV->setSection(Section);
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ GV->setAlignment(CharUnits::One().getQuantity());
+ CGM.addCompilerUsedGlobal(GV);
+
+ return GV;
+}
+
llvm::Function *CGObjCMac::ModuleInitFunction() {
// Abuse this interface function as a place to finalize.
FinishModule();
@@ -4390,8 +4679,8 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
- src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
- : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, CGM.Int32Ty)
+ : CGF.Builder.CreateBitCast(src, CGM.Int64Ty);
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
@@ -4411,8 +4700,8 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
- src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
- : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, CGM.Int32Ty)
+ : CGF.Builder.CreateBitCast(src, CGM.Int64Ty);
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
@@ -4437,8 +4726,8 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
- src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
- : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, CGM.Int32Ty)
+ : CGF.Builder.CreateBitCast(src, CGM.Int64Ty);
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
@@ -4456,8 +4745,8 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
- src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
- : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, CGM.Int32Ty)
+ : CGF.Builder.CreateBitCast(src, CGM.Int64Ty);
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
@@ -4590,15 +4879,14 @@ static const int ModuleVersion = 7;
void CGObjCMac::EmitModuleInfo() {
uint64_t Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ModuleTy);
- llvm::Constant *Values[] = {
- llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion),
- llvm::ConstantInt::get(ObjCTypes.LongTy, Size),
- // This used to be the filename, now it is unused. <rdr://4327263>
- GetClassName(StringRef("")),
- EmitModuleSymbols()
- };
- CreateMetadataVar("OBJC_MODULES",
- llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ModuleTy);
+ values.addInt(ObjCTypes.LongTy, ModuleVersion);
+ values.addInt(ObjCTypes.LongTy, Size);
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ values.add(GetClassName(StringRef("")));
+ values.add(EmitModuleSymbols());
+ CreateMetadataVar("OBJC_MODULES", values,
"__OBJC,__module_info,regular,no_dead_strip",
CGM.getPointerAlign(), true);
}
@@ -4611,15 +4899,16 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
if (!NumClasses && !NumCategories)
return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
- llvm::Constant *Values[5];
- Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
- Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
- Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
- Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories);
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ values.addInt(ObjCTypes.LongTy, 0);
+ values.addNullPointer(ObjCTypes.SelectorPtrTy);
+ values.addInt(ObjCTypes.ShortTy, NumClasses);
+ values.addInt(ObjCTypes.ShortTy, NumCategories);
// The runtime expects exactly the list of defined classes followed
// by the list of defined categories, in a single array.
- SmallVector<llvm::Constant*, 8> Symbols(NumClasses + NumCategories);
+ auto array = values.beginArray(ObjCTypes.Int8PtrTy);
for (unsigned i=0; i<NumClasses; i++) {
const ObjCInterfaceDecl *ID = ImplementedClasses[i];
assert(ID);
@@ -4627,24 +4916,16 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
// We are implementing a weak imported interface. Give it external linkage
if (ID->isWeakImported() && !IMP->isWeakImported())
DefinedClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage);
-
- Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
- ObjCTypes.Int8PtrTy);
+
+ array.addBitCast(DefinedClasses[i], ObjCTypes.Int8PtrTy);
}
for (unsigned i=0; i<NumCategories; i++)
- Symbols[NumClasses + i] =
- llvm::ConstantExpr::getBitCast(DefinedCategories[i],
- ObjCTypes.Int8PtrTy);
+ array.addBitCast(DefinedCategories[i], ObjCTypes.Int8PtrTy);
- Values[4] =
- llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
- Symbols.size()),
- Symbols);
-
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ array.finishAndAddTo(values);
llvm::GlobalVariable *GV = CreateMetadataVar(
- "OBJC_SYMBOLS", Init, "__OBJC,__symbols,regular,no_dead_strip",
+ "OBJC_SYMBOLS", values, "__OBJC,__symbols,regular,no_dead_strip",
CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
}
@@ -4707,12 +4988,7 @@ Address CGObjCMac::EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel) {
llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
llvm::GlobalVariable *&Entry = ClassNames[RuntimeName];
if (!Entry)
- Entry = CreateMetadataVar(
- "OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext, RuntimeName),
- ((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- CharUnits::One(), true);
+ Entry = CreateCStringLiteral(RuntimeName, ObjCLabelType::ClassName);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4960,14 +5236,8 @@ llvm::Constant *IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC,
// Null terminate the string.
buffer.push_back(0);
- bool isNonFragileABI = CGObjC.isNonFragileABI();
-
- llvm::GlobalVariable *Entry = CGObjC.CreateMetadataVar(
- "OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::get(CGM.getLLVMContext(), buffer),
- (isNonFragileABI ? "__TEXT,__objc_classname,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- CharUnits::One(), true);
+ auto *Entry = CGObjC.CreateCStringLiteral(
+ reinterpret_cast<char *>(buffer.data()), ObjCLabelType::ClassName);
return getConstantGEP(CGM.getLLVMContext(), Entry, 0, 0);
}
@@ -5062,16 +5332,9 @@ CGObjCCommonMac::BuildIvarLayout(const ObjCImplementationDecl *OMD,
llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
-
// FIXME: Avoid std::string in "Sel.getAsString()"
if (!Entry)
- Entry = CreateMetadataVar(
- "OBJC_METH_VAR_NAME_",
- llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
- ((ObjCABI == 2) ? "__TEXT,__objc_methname,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- CharUnits::One(), true);
-
+ Entry = CreateCStringLiteral(Sel.getAsString(), ObjCLabelType::MethodVarName);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -5085,47 +5348,27 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
-
if (!Entry)
- Entry = CreateMetadataVar(
- "OBJC_METH_VAR_TYPE_",
- llvm::ConstantDataArray::getString(VMContext, TypeStr),
- ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- CharUnits::One(), true);
-
+ Entry = CreateCStringLiteral(TypeStr, ObjCLabelType::MethodVarType);
return getConstantGEP(VMContext, Entry, 0, 0);
}
llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
bool Extended) {
- std::string TypeStr;
- if (CGM.getContext().getObjCEncodingForMethodDecl(D, TypeStr, Extended))
- return nullptr;
+ std::string TypeStr =
+ CGM.getContext().getObjCEncodingForMethodDecl(D, Extended);
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
-
if (!Entry)
- Entry = CreateMetadataVar(
- "OBJC_METH_VAR_TYPE_",
- llvm::ConstantDataArray::getString(VMContext, TypeStr),
- ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- CharUnits::One(), true);
-
+ Entry = CreateCStringLiteral(TypeStr, ObjCLabelType::MethodVarType);
return getConstantGEP(VMContext, Entry, 0, 0);
}
// FIXME: Merge into a single cstring creation function.
llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
llvm::GlobalVariable *&Entry = PropertyNames[Ident];
-
if (!Entry)
- Entry = CreateMetadataVar(
- "OBJC_PROP_NAME_ATTR_",
- llvm::ConstantDataArray::getString(VMContext, Ident->getName()),
- "__TEXT,__cstring,cstring_literals", CharUnits::One(), true);
-
+ Entry = CreateCStringLiteral(Ident->getName(), ObjCLabelType::PropertyName);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -5134,8 +5377,8 @@ llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
llvm::Constant *
CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
const Decl *Container) {
- std::string TypeStr;
- CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ std::string TypeStr =
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container);
return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
}
@@ -5157,20 +5400,20 @@ void CGObjCMac::FinishModule() {
// Emit the dummy bodies for any protocols which were referenced but
// never defined.
- for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator
- I = Protocols.begin(), e = Protocols.end(); I != e; ++I) {
- if (I->second->hasInitializer())
+ for (auto &entry : Protocols) {
+ llvm::GlobalVariable *global = entry.second;
+ if (global->hasInitializer())
continue;
- llvm::Constant *Values[5];
- Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
- Values[1] = GetClassName(I->first->getName());
- Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
- Values[3] = Values[4] =
- llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
- I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
- Values));
- CGM.addCompilerUsedGlobal(I->second);
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ProtocolTy);
+ values.addNullPointer(ObjCTypes.ProtocolExtensionPtrTy);
+ values.add(GetClassName(entry.first->getName()));
+ values.addNullPointer(ObjCTypes.ProtocolListPtrTy);
+ values.addNullPointer(ObjCTypes.MethodDescriptionListPtrTy);
+ values.addNullPointer(ObjCTypes.MethodDescriptionListPtrTy);
+ values.finishAndSetAsInitializer(global);
+ CGM.addCompilerUsedGlobal(global);
}
// Add assembler directives to add lazy undefined symbol references
@@ -5178,27 +5421,23 @@ void CGObjCMac::FinishModule() {
// important for correct linker interaction.
//
// FIXME: It would be nice if we had an LLVM construct for this.
- if (!LazySymbols.empty() || !DefinedSymbols.empty()) {
+ if ((!LazySymbols.empty() || !DefinedSymbols.empty()) &&
+ CGM.getTriple().isOSBinFormatMachO()) {
SmallString<256> Asm;
Asm += CGM.getModule().getModuleInlineAsm();
if (!Asm.empty() && Asm.back() != '\n')
Asm += '\n';
llvm::raw_svector_ostream OS(Asm);
- for (llvm::SetVector<IdentifierInfo*>::iterator I = DefinedSymbols.begin(),
- e = DefinedSymbols.end(); I != e; ++I)
- OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
- << "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
- for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
- e = LazySymbols.end(); I != e; ++I) {
- OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
- }
+ for (const auto *Sym : DefinedSymbols)
+ OS << "\t.objc_class_name_" << Sym->getName() << "=0\n"
+ << "\t.globl .objc_class_name_" << Sym->getName() << "\n";
+ for (const auto *Sym : LazySymbols)
+ OS << "\t.lazy_reference .objc_class_name_" << Sym->getName() << "\n";
+ for (const auto &Category : DefinedCategoryNames)
+ OS << "\t.objc_category_name_" << Category << "=0\n"
+ << "\t.globl .objc_category_name_" << Category << "\n";
- for (size_t i = 0, e = DefinedCategoryNames.size(); i < e; ++i) {
- OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n"
- << "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n";
- }
-
CGM.getModule().setModuleInlineAsm(OS.str());
}
}
@@ -5217,10 +5456,9 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
- ShortTy = Types.ConvertType(Ctx.ShortTy);
- IntTy = Types.ConvertType(Ctx.IntTy);
- LongTy = Types.ConvertType(Ctx.LongTy);
- LongLongTy = Types.ConvertType(Ctx.LongLongTy);
+ ShortTy = cast<llvm::IntegerType>(Types.ConvertType(Ctx.ShortTy));
+ IntTy = CGM.IntTy;
+ LongTy = cast<llvm::IntegerType>(Types.ConvertType(Ctx.LongTy));
Int8PtrTy = CGM.Int8PtrTy;
Int8PtrPtrTy = CGM.Int8PtrPtrTy;
@@ -5231,9 +5469,12 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
else
IvarOffsetVarTy = LongTy;
- ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
- PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
- SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
+ ObjectPtrTy =
+ cast<llvm::PointerType>(Types.ConvertType(Ctx.getObjCIdType()));
+ PtrObjectPtrTy =
+ llvm::PointerType::getUnqual(ObjectPtrTy);
+ SelectorPtrTy =
+ cast<llvm::PointerType>(Types.ConvertType(Ctx.getObjCSelType()));
// I'm not sure I like this. The implicit coordination is a bit
// gross. We should solve this in a reasonable fashion because this
@@ -5831,7 +6072,6 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
unsigned InstanceSize,
const ObjCImplementationDecl *ID) {
std::string ClassName = ID->getObjCRuntimeNameAsString();
- llvm::Constant *Values[10]; // 11 for 64bit targets!
CharUnits beginInstance = CharUnits::fromQuantity(InstanceStart);
CharUnits endInstance = CharUnits::fromQuantity(InstanceSize);
@@ -5842,85 +6082,84 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
else if ((hasMRCWeak = hasMRCWeakIvars(CGM, ID)))
flags |= NonFragileABI_Class_HasMRCWeakIvars;
- Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
- Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
- Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
- // FIXME. For 64bit targets add 0 here.
- Values[ 3] = (flags & NonFragileABI_Class_Meta)
- ? GetIvarLayoutName(nullptr, ObjCTypes)
- : BuildStrongIvarLayout(ID, beginInstance, endInstance);
- Values[ 4] = GetClassName(ID->getObjCRuntimeNameAsString());
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ClassRonfABITy);
+
+ values.addInt(ObjCTypes.IntTy, flags);
+ values.addInt(ObjCTypes.IntTy, InstanceStart);
+ values.addInt(ObjCTypes.IntTy, InstanceSize);
+ values.add((flags & NonFragileABI_Class_Meta)
+ ? GetIvarLayoutName(nullptr, ObjCTypes)
+ : BuildStrongIvarLayout(ID, beginInstance, endInstance));
+ values.add(GetClassName(ID->getObjCRuntimeNameAsString()));
+
// const struct _method_list_t * const baseMethods;
- std::vector<llvm::Constant*> Methods;
- std::string MethodListName("\01l_OBJC_$_");
+ SmallVector<const ObjCMethodDecl*, 16> methods;
if (flags & NonFragileABI_Class_Meta) {
- MethodListName += "CLASS_METHODS_";
- MethodListName += ID->getObjCRuntimeNameAsString();
- for (const auto *I : ID->class_methods())
- // Class methods should always be defined.
- Methods.push_back(GetMethodConstant(I));
+ for (const auto *MD : ID->class_methods())
+ methods.push_back(MD);
} else {
- MethodListName += "INSTANCE_METHODS_";
- MethodListName += ID->getObjCRuntimeNameAsString();
- for (const auto *I : ID->instance_methods())
- // Instance methods should always be defined.
- Methods.push_back(GetMethodConstant(I));
+ for (const auto *MD : ID->instance_methods())
+ methods.push_back(MD);
for (const auto *PID : ID->property_impls()) {
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
ObjCPropertyDecl *PD = PID->getPropertyDecl();
- if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
- if (llvm::Constant *C = GetMethodConstant(MD))
- Methods.push_back(C);
- if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
- if (llvm::Constant *C = GetMethodConstant(MD))
- Methods.push_back(C);
+ if (auto MD = PD->getGetterMethodDecl())
+ if (GetMethodDefinition(MD))
+ methods.push_back(MD);
+ if (auto MD = PD->getSetterMethodDecl())
+ if (GetMethodDefinition(MD))
+ methods.push_back(MD);
}
}
}
- Values[ 5] = EmitMethodList(MethodListName,
- "__DATA, __objc_const", Methods);
+
+ values.add(emitMethodList(ID->getObjCRuntimeNameAsString(),
+ (flags & NonFragileABI_Class_Meta)
+ ? MethodListType::ClassMethods
+ : MethodListType::InstanceMethods,
+ methods));
const ObjCInterfaceDecl *OID = ID->getClassInterface();
assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
- Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ values.add(EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ OID->getObjCRuntimeNameAsString(),
- OID->all_referenced_protocol_begin(),
- OID->all_referenced_protocol_end());
+ OID->all_referenced_protocol_begin(),
+ OID->all_referenced_protocol_end()));
if (flags & NonFragileABI_Class_Meta) {
- Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
- Values[ 8] = GetIvarLayoutName(nullptr, ObjCTypes);
- Values[ 9] = EmitPropertyList(
+ values.addNullPointer(ObjCTypes.IvarListnfABIPtrTy);
+ values.add(GetIvarLayoutName(nullptr, ObjCTypes));
+ values.add(EmitPropertyList(
"\01l_OBJC_$_CLASS_PROP_LIST_" + ID->getObjCRuntimeNameAsString(),
- ID, ID->getClassInterface(), ObjCTypes, true);
+ ID, ID->getClassInterface(), ObjCTypes, true));
} else {
- Values[ 7] = EmitIvarList(ID);
- Values[ 8] = BuildWeakIvarLayout(ID, beginInstance, endInstance,
- hasMRCWeak);
- Values[ 9] = EmitPropertyList(
+ values.add(EmitIvarList(ID));
+ values.add(BuildWeakIvarLayout(ID, beginInstance, endInstance, hasMRCWeak));
+ values.add(EmitPropertyList(
"\01l_OBJC_$_PROP_LIST_" + ID->getObjCRuntimeNameAsString(),
- ID, ID->getClassInterface(), ObjCTypes, false);
+ ID, ID->getClassInterface(), ObjCTypes, false));
}
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
- Values);
+
+ llvm::SmallString<64> roLabel;
+ llvm::raw_svector_ostream(roLabel)
+ << ((flags & NonFragileABI_Class_Meta) ? "\01l_OBJC_METACLASS_RO_$_"
+ : "\01l_OBJC_CLASS_RO_$_")
+ << ClassName;
+
llvm::GlobalVariable *CLASS_RO_GV =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
- llvm::GlobalValue::PrivateLinkage,
- Init,
- (flags & NonFragileABI_Class_Meta) ?
- std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
- std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
- CLASS_RO_GV->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
- CLASS_RO_GV->setSection("__DATA, __objc_const");
+ values.finishAndCreateGlobal(roLabel, CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ CLASS_RO_GV->setSection("__DATA, __objc_const");
return CLASS_RO_GV;
-
}
-/// BuildClassMetaData - This routine defines that to-level meta-data
-/// for the given ClassName for:
+/// Build the metaclass object for a class.
+///
/// struct _class_t {
/// struct _class_t *isa;
/// struct _class_t * const superclass;
@@ -5929,28 +6168,33 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
/// struct class_ro_t *ro;
/// }
///
-llvm::GlobalVariable *CGObjCNonFragileABIMac::BuildClassMetaData(
- const std::string &ClassName, llvm::Constant *IsAGV, llvm::Constant *SuperClassGV,
- llvm::Constant *ClassRoGV, bool HiddenVisibility, bool Weak) {
- llvm::Constant *Values[] = {
- IsAGV,
- SuperClassGV,
- ObjCEmptyCacheVar, // &ObjCEmptyCacheVar
- ObjCEmptyVtableVar, // &ObjCEmptyVtableVar
- ClassRoGV // &CLASS_RO_GV
- };
- if (!Values[1])
- Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
- if (!Values[3])
- Values[3] = llvm::Constant::getNullValue(
- llvm::PointerType::getUnqual(ObjCTypes.ImpnfABITy));
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
- Values);
- llvm::GlobalVariable *GV = GetClassGlobal(ClassName, Weak);
- GV->setInitializer(Init);
- GV->setSection("__DATA, __objc_data");
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::BuildClassObject(const ObjCInterfaceDecl *CI,
+ bool isMetaclass,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility) {
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ClassnfABITy);
+ values.add(IsAGV);
+ if (SuperClassGV) {
+ values.add(SuperClassGV);
+ } else {
+ values.addNullPointer(ObjCTypes.ClassnfABIPtrTy);
+ }
+ values.add(ObjCEmptyCacheVar);
+ values.add(ObjCEmptyVtableVar);
+ values.add(ClassRoGV);
+
+ llvm::GlobalVariable *GV =
+ cast<llvm::GlobalVariable>(GetClassGlobal(CI, isMetaclass, ForDefinition));
+ values.finishAndSetAsInitializer(GV);
+
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GV->setSection("__DATA, __objc_data");
GV->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABITy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABITy));
if (!CGM.getTriple().isOSBinFormatCOFF())
if (HiddenVisibility)
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
@@ -6014,6 +6258,9 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ImpnfABITy, false,
llvm::GlobalValue::ExternalLinkage, nullptr,
"_objc_empty_vtable");
+ else
+ ObjCEmptyVtableVar =
+ llvm::ConstantPointerNull::get(ObjCTypes.ImpnfABITy->getPointerTo());
}
// FIXME: Is this correct (that meta class size is never computed)?
@@ -6022,9 +6269,8 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
uint32_t InstanceSize = InstanceStart;
uint32_t flags = NonFragileABI_Class_Meta;
- llvm::GlobalVariable *SuperClassGV, *IsAGV;
+ llvm::Constant *SuperClassGV, *IsAGV;
- StringRef ClassName = ID->getObjCRuntimeNameAsString();
const auto *CI = ID->getClassInterface();
assert(CI && "CGObjCNonFragileABIMac::GenerateClass - class is 0");
@@ -6047,17 +6293,8 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
// class is root
flags |= NonFragileABI_Class_Root;
- SuperClassGV = GetClassGlobal((getClassSymbolPrefix() + ClassName).str(),
- CI->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (CI->hasAttr<DLLImportAttr>())
- SuperClassGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
-
- IsAGV = GetClassGlobal((getMetaclassSymbolPrefix() + ClassName).str(),
- CI->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (CI->hasAttr<DLLImportAttr>())
- IsAGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ SuperClassGV = GetClassGlobal(CI, /*metaclass*/ false, NotForDefinition);
+ IsAGV = GetClassGlobal(CI, /*metaclass*/ true, NotForDefinition);
} else {
// Has a root. Current class is not a root.
const ObjCInterfaceDecl *Root = ID->getClassInterface();
@@ -6065,31 +6302,16 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
Root = Super;
const auto *Super = CI->getSuperClass();
- StringRef RootClassName = Root->getObjCRuntimeNameAsString();
- StringRef SuperClassName = Super->getObjCRuntimeNameAsString();
-
- IsAGV = GetClassGlobal((getMetaclassSymbolPrefix() + RootClassName).str(),
- Root->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (Root->hasAttr<DLLImportAttr>())
- IsAGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
-
- // work on super class metadata symbol.
- SuperClassGV =
- GetClassGlobal((getMetaclassSymbolPrefix() + SuperClassName).str(),
- Super->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (Super->hasAttr<DLLImportAttr>())
- SuperClassGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ IsAGV = GetClassGlobal(Root, /*metaclass*/ true, NotForDefinition);
+ SuperClassGV = GetClassGlobal(Super, /*metaclass*/ true, NotForDefinition);
}
llvm::GlobalVariable *CLASS_RO_GV =
BuildClassRoTInitializer(flags, InstanceStart, InstanceSize, ID);
llvm::GlobalVariable *MetaTClass =
- BuildClassMetaData((getMetaclassSymbolPrefix() + ClassName).str(), IsAGV,
- SuperClassGV, CLASS_RO_GV, classIsHidden,
- CI->isWeakImported());
+ BuildClassObject(CI, /*metaclass*/ true,
+ IsAGV, SuperClassGV, CLASS_RO_GV, classIsHidden);
if (CGM.getTriple().isOSBinFormatCOFF())
if (CI->hasAttr<DLLExportAttr>())
MetaTClass->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
@@ -6122,14 +6344,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
} else {
// Has a root. Current class is not a root.
const auto *Super = CI->getSuperClass();
- StringRef SuperClassName = Super->getObjCRuntimeNameAsString();
-
- SuperClassGV =
- GetClassGlobal((getClassSymbolPrefix() + SuperClassName).str(),
- Super->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (Super->hasAttr<DLLImportAttr>())
- SuperClassGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ SuperClassGV = GetClassGlobal(Super, /*metaclass*/ false, NotForDefinition);
}
GetClassSizeInfo(ID, InstanceStart, InstanceSize);
@@ -6137,9 +6352,8 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
BuildClassRoTInitializer(flags, InstanceStart, InstanceSize, ID);
llvm::GlobalVariable *ClassMD =
- BuildClassMetaData((getClassSymbolPrefix() + ClassName).str(), MetaTClass,
- SuperClassGV, CLASS_RO_GV, classIsHidden,
- CI->isWeakImported());
+ BuildClassObject(CI, /*metaclass*/ false,
+ MetaTClass, SuperClassGV, CLASS_RO_GV, classIsHidden);
if (CGM.getTriple().isOSBinFormatCOFF())
if (CI->hasAttr<DLLExportAttr>())
ClassMD->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
@@ -6152,7 +6366,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
// Force the definition of the EHType if necessary.
if (flags & NonFragileABI_Class_Exception)
- GetInterfaceEHType(CI, true);
+ (void) GetInterfaceEHType(CI, ForDefinition);
// Make sure method definition entries are all clear for next implementation.
MethodDefinitions.clear();
}
@@ -6217,82 +6431,59 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
ExtCatName += "_$_";
ExtCatName += OCD->getNameAsString();
- llvm::SmallString<64> ExtClassName(getClassSymbolPrefix());
- ExtClassName += Interface->getObjCRuntimeNameAsString();
-
- llvm::Constant *Values[8];
- Values[0] = GetClassName(OCD->getIdentifier()->getName());
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.CategorynfABITy);
+ values.add(GetClassName(OCD->getIdentifier()->getName()));
// meta-class entry symbol
- llvm::GlobalVariable *ClassGV =
- GetClassGlobal(ExtClassName.str(), Interface->isWeakImported());
+ values.add(GetClassGlobal(Interface, /*metaclass*/ false, NotForDefinition));
+ std::string listName =
+ (Interface->getObjCRuntimeNameAsString() + "_$_" + OCD->getName()).str();
+
+ SmallVector<const ObjCMethodDecl *, 16> instanceMethods;
+ SmallVector<const ObjCMethodDecl *, 8> classMethods;
+ for (const auto *MD : OCD->methods()) {
+ if (MD->isInstanceMethod()) {
+ instanceMethods.push_back(MD);
+ } else {
+ classMethods.push_back(MD);
+ }
+ }
+
+ values.add(emitMethodList(listName, MethodListType::CategoryInstanceMethods,
+ instanceMethods));
+ values.add(emitMethodList(listName, MethodListType::CategoryClassMethods,
+ classMethods));
- Values[1] = ClassGV;
- std::vector<llvm::Constant*> Methods;
- llvm::SmallString<64> MethodListName(Prefix);
-
- MethodListName += "INSTANCE_METHODS_";
- MethodListName += Interface->getObjCRuntimeNameAsString();
- MethodListName += "_$_";
- MethodListName += OCD->getName();
-
- for (const auto *I : OCD->instance_methods())
- // Instance methods should always be defined.
- Methods.push_back(GetMethodConstant(I));
-
- Values[2] = EmitMethodList(MethodListName.str(),
- "__DATA, __objc_const",
- Methods);
-
- MethodListName = Prefix;
- MethodListName += "CLASS_METHODS_";
- MethodListName += Interface->getObjCRuntimeNameAsString();
- MethodListName += "_$_";
- MethodListName += OCD->getNameAsString();
-
- Methods.clear();
- for (const auto *I : OCD->class_methods())
- // Class methods should always be defined.
- Methods.push_back(GetMethodConstant(I));
-
- Values[3] = EmitMethodList(MethodListName.str(),
- "__DATA, __objc_const",
- Methods);
const ObjCCategoryDecl *Category =
Interface->FindCategoryDeclaration(OCD->getIdentifier());
if (Category) {
SmallString<256> ExtName;
llvm::raw_svector_ostream(ExtName) << Interface->getObjCRuntimeNameAsString() << "_$_"
<< OCD->getName();
- Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ values.add(EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ Interface->getObjCRuntimeNameAsString() + "_$_"
+ Category->getName(),
- Category->protocol_begin(),
- Category->protocol_end());
- Values[5] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
- OCD, Category, ObjCTypes, false);
- Values[6] = EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
- OCD, Category, ObjCTypes, true);
+ Category->protocol_begin(),
+ Category->protocol_end()));
+ values.add(EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes, false));
+ values.add(EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes, true));
} else {
- Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
- Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
- Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ values.addNullPointer(ObjCTypes.ProtocolListnfABIPtrTy);
+ values.addNullPointer(ObjCTypes.PropertyListPtrTy);
+ values.addNullPointer(ObjCTypes.PropertyListPtrTy);
}
unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.CategorynfABITy);
- Values[7] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
-
- llvm::Constant *Init =
- llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy,
- Values);
- llvm::GlobalVariable *GCATV
- = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.CategorynfABITy,
- false,
- llvm::GlobalValue::PrivateLinkage,
- Init,
- ExtCatName.str());
- GCATV->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.CategorynfABITy));
- GCATV->setSection("__DATA, __objc_const");
+ values.addInt(ObjCTypes.IntTy, Size);
+
+ llvm::GlobalVariable *GCATV =
+ values.finishAndCreateGlobal(ExtCatName.str(), CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GCATV->setSection("__DATA, __objc_const");
CGM.addCompilerUsedGlobal(GCATV);
DefinedCategories.push_back(GCATV);
@@ -6303,25 +6494,37 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
MethodDefinitions.clear();
}
-/// GetMethodConstant - Return a struct objc_method constant for the
-/// given method if it has been defined. The result is null if the
-/// method has not been defined. The return value has type MethodPtrTy.
-llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
- const ObjCMethodDecl *MD) {
- llvm::Function *Fn = GetMethodDefinition(MD);
- if (!Fn)
- return nullptr;
-
- llvm::Constant *Method[] = {
- llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy),
- GetMethodVarType(MD),
- llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
- };
- return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+/// emitMethodConstant - Return a struct objc_method constant. If
+/// forProtocol is true, the implementation will be null; otherwise,
+/// the method must have a definition registered with the runtime.
+///
+/// struct _objc_method {
+/// SEL _cmd;
+/// char *method_type;
+/// char *_imp;
+/// }
+void CGObjCNonFragileABIMac::emitMethodConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD,
+ bool forProtocol) {
+ auto method = builder.beginStruct(ObjCTypes.MethodTy);
+ method.addBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ method.add(GetMethodVarType(MD));
+
+ if (forProtocol) {
+ // Protocol methods have no implementation. So, this entry is always NULL.
+ method.addNullPointer(ObjCTypes.Int8PtrTy);
+ } else {
+ llvm::Function *fn = GetMethodDefinition(MD);
+ assert(fn && "no definition for method?");
+ method.addBitCast(fn, ObjCTypes.Int8PtrTy);
+ }
+
+ method.finishAndAddTo(builder);
}
-/// EmitMethodList - Build meta-data for method declarations
+/// Build meta-data for method declarations.
+///
/// struct _method_list_t {
/// uint32_t entsize; // sizeof(struct _objc_method)
/// uint32_t method_count;
@@ -6329,28 +6532,69 @@ llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
/// }
///
llvm::Constant *
-CGObjCNonFragileABIMac::EmitMethodList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods) {
+CGObjCNonFragileABIMac::emitMethodList(Twine name, MethodListType kind,
+ ArrayRef<const ObjCMethodDecl *> methods) {
// Return null for empty list.
- if (Methods.empty())
+ if (methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
- llvm::Constant *Values[3];
+ StringRef prefix;
+ bool forProtocol;
+ switch (kind) {
+ case MethodListType::CategoryInstanceMethods:
+ prefix = "\01l_OBJC_$_CATEGORY_INSTANCE_METHODS_";
+ forProtocol = false;
+ break;
+ case MethodListType::CategoryClassMethods:
+ prefix = "\01l_OBJC_$_CATEGORY_CLASS_METHODS_";
+ forProtocol = false;
+ break;
+ case MethodListType::InstanceMethods:
+ prefix = "\01l_OBJC_$_INSTANCE_METHODS_";
+ forProtocol = false;
+ break;
+ case MethodListType::ClassMethods:
+ prefix = "\01l_OBJC_$_CLASS_METHODS_";
+ forProtocol = false;
+ break;
+
+ case MethodListType::ProtocolInstanceMethods:
+ prefix = "\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_";
+ forProtocol = true;
+ break;
+ case MethodListType::ProtocolClassMethods:
+ prefix = "\01l_OBJC_$_PROTOCOL_CLASS_METHODS_";
+ forProtocol = true;
+ break;
+ case MethodListType::OptionalProtocolInstanceMethods:
+ prefix = "\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_";
+ forProtocol = true;
+ break;
+ case MethodListType::OptionalProtocolClassMethods:
+ prefix = "\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_";
+ forProtocol = true;
+ break;
+ }
+
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+
// sizeof(struct _objc_method)
unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.MethodTy);
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ values.addInt(ObjCTypes.IntTy, Size);
// method_count
- Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
- Methods.size());
- Values[2] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ values.addInt(ObjCTypes.IntTy, methods.size());
+ auto methodArray = values.beginArray(ObjCTypes.MethodTy);
+ for (auto MD : methods) {
+ emitMethodConstant(methodArray, MD, forProtocol);
+ }
+ methodArray.finishAndAddTo(values);
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::PrivateLinkage, Init, Name);
- GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
- GV->setSection(Section);
+ auto *GV = values.finishAndCreateGlobal(prefix + name, CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GV->setSection("__DATA, __objc_const");
CGM.addCompilerUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
}
@@ -6406,7 +6650,8 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
}
- IvarOffsetGV->setSection("__DATA, __objc_ivar");
+ if (CGM.getTriple().isOSBinFormatMachO())
+ IvarOffsetGV->setSection("__DATA, __objc_ivar");
return IvarOffsetGV;
}
@@ -6430,7 +6675,12 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
const ObjCImplementationDecl *ID) {
- std::vector<llvm::Constant*> Ivars;
+ ConstantInitBuilder builder(CGM);
+ auto ivarList = builder.beginStruct();
+ ivarList.addInt(ObjCTypes.IntTy,
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.IvarnfABITy));
+ auto ivarCountSlot = ivarList.addPlaceholder();
+ auto ivars = ivarList.beginArray(ObjCTypes.IvarnfABITy);
const ObjCInterfaceDecl *OID = ID->getClassInterface();
assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
@@ -6442,48 +6692,45 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
- llvm::Constant *Ivar[5];
- Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
- ComputeIvarBaseOffset(CGM, ID, IVD));
- Ivar[1] = GetMethodVarName(IVD->getIdentifier());
- Ivar[2] = GetMethodVarType(IVD);
+
+ auto ivar = ivars.beginStruct(ObjCTypes.IvarnfABITy);
+ ivar.add(EmitIvarOffsetVar(ID->getClassInterface(), IVD,
+ ComputeIvarBaseOffset(CGM, ID, IVD)));
+ ivar.add(GetMethodVarName(IVD->getIdentifier()));
+ ivar.add(GetMethodVarType(IVD));
llvm::Type *FieldTy =
CGM.getTypes().ConvertTypeForMem(IVD->getType());
unsigned Size = CGM.getDataLayout().getTypeAllocSize(FieldTy);
unsigned Align = CGM.getContext().getPreferredTypeAlign(
IVD->getType().getTypePtr()) >> 3;
Align = llvm::Log2_32(Align);
- Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align);
+ ivar.addInt(ObjCTypes.IntTy, Align);
// NOTE. Size of a bitfield does not match gcc's, because of the
// way bitfields are treated special in each. But I am told that
// 'size' for bitfield ivars is ignored by the runtime so it does
// not matter. If it matters, there is enough info to get the
// bitfield right!
- Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar));
+ ivar.addInt(ObjCTypes.IntTy, Size);
+ ivar.finishAndAddTo(ivars);
}
// Return null for empty list.
- if (Ivars.empty())
+ if (ivars.empty()) {
+ ivars.abandon();
+ ivarList.abandon();
return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ }
+
+ auto ivarCount = ivars.size();
+ ivars.finishAndAddTo(ivarList);
+ ivarList.fillPlaceholderWithInt(ivarCountSlot, ObjCTypes.IntTy, ivarCount);
- llvm::Constant *Values[3];
- unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.IvarnfABITy);
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
- Ivars.size());
- Values[2] = llvm::ConstantArray::get(AT, Ivars);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::PrivateLinkage,
- Init,
- Prefix + OID->getObjCRuntimeNameAsString());
- GV->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(Init->getType()));
- GV->setSection("__DATA, __objc_const");
-
+ ivarList.finishAndCreateGlobal(Prefix + OID->getObjCRuntimeNameAsString(),
+ CGM.getPointerAlign(), /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GV->setSection("__DATA, __objc_const");
CGM.addCompilerUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
}
@@ -6492,15 +6739,20 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
const ObjCProtocolDecl *PD) {
llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
- if (!Entry)
+ if (!Entry) {
// We use the initializer as a marker of whether this is a forward
// reference or not. At module finalization we add the empty
// contents for protocols which were referenced but never defined.
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
- false, llvm::GlobalValue::ExternalLinkage,
- nullptr,
- "\01l_OBJC_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
+ llvm::SmallString<64> Protocol;
+ llvm::raw_svector_ostream(Protocol) << "\01l_OBJC_PROTOCOL_$_"
+ << PD->getObjCRuntimeNameAsString();
+
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ nullptr, Protocol);
+ if (!CGM.getTriple().isOSBinFormatMachO())
+ Entry->setComdat(CGM.getModule().getOrInsertComdat(Protocol));
+ }
return Entry;
}
@@ -6537,96 +6789,59 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
if (const ObjCProtocolDecl *Def = PD->getDefinition())
PD = Def;
- // Construct method lists.
- std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
- std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
- std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
- for (const auto *MD : PD->instance_methods()) {
- llvm::Constant *C = GetMethodDescriptionConstant(MD);
- if (!C)
- return GetOrEmitProtocolRef(PD);
-
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
- OptInstanceMethods.push_back(C);
- OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
- } else {
- InstanceMethods.push_back(C);
- MethodTypesExt.push_back(GetMethodVarType(MD, true));
- }
- }
-
- for (const auto *MD : PD->class_methods()) {
- llvm::Constant *C = GetMethodDescriptionConstant(MD);
- if (!C)
- return GetOrEmitProtocolRef(PD);
-
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
- OptClassMethods.push_back(C);
- OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
- } else {
- ClassMethods.push_back(C);
- MethodTypesExt.push_back(GetMethodVarType(MD, true));
- }
- }
+ auto methodLists = ProtocolMethodLists::get(PD);
- MethodTypesExt.insert(MethodTypesExt.end(),
- OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ProtocolnfABITy);
- llvm::Constant *Values[13];
// isa is NULL
- Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
- Values[1] = GetClassName(PD->getObjCRuntimeNameAsString());
- Values[2] = EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_" + PD->getObjCRuntimeNameAsString(),
+ values.addNullPointer(ObjCTypes.ObjectPtrTy);
+ values.add(GetClassName(PD->getObjCRuntimeNameAsString()));
+ values.add(EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_"
+ + PD->getObjCRuntimeNameAsString(),
PD->protocol_begin(),
- PD->protocol_end());
-
- Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_"
- + PD->getObjCRuntimeNameAsString(),
- "__DATA, __objc_const",
- InstanceMethods);
- Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_"
- + PD->getObjCRuntimeNameAsString(),
- "__DATA, __objc_const",
- ClassMethods);
- Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_"
- + PD->getObjCRuntimeNameAsString(),
- "__DATA, __objc_const",
- OptInstanceMethods);
- Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_"
- + PD->getObjCRuntimeNameAsString(),
- "__DATA, __objc_const",
- OptClassMethods);
- Values[7] = EmitPropertyList(
- "\01l_OBJC_$_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
- nullptr, PD, ObjCTypes, false);
+ PD->protocol_end()));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::RequiredInstanceMethods));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::RequiredClassMethods));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::OptionalInstanceMethods));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::OptionalClassMethods));
+ values.add(EmitPropertyList(
+ "\01l_OBJC_$_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
+ nullptr, PD, ObjCTypes, false));
uint32_t Size =
CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
- Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
- Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ values.addInt(ObjCTypes.IntTy, Size);
+ values.addInt(ObjCTypes.IntTy, 0);
+ values.add(EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ PD->getObjCRuntimeNameAsString(),
- MethodTypesExt, ObjCTypes);
+ methodLists.emitExtendedTypesArray(this),
+ ObjCTypes));
+
// const char *demangledName;
- Values[11] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ values.addNullPointer(ObjCTypes.Int8PtrTy);
- Values[12] = EmitPropertyList(
+ values.add(EmitPropertyList(
"\01l_OBJC_$_CLASS_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
- nullptr, PD, ObjCTypes, true);
+ nullptr, PD, ObjCTypes, true));
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
- Values);
-
if (Entry) {
// Already created, fix the linkage and update the initializer.
Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
- Entry->setInitializer(Init);
+ values.finishAndSetAsInitializer(Entry);
} else {
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
- false, llvm::GlobalValue::WeakAnyLinkage, Init,
- "\01l_OBJC_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
- Entry->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
+ llvm::SmallString<64> symbolName;
+ llvm::raw_svector_ostream(symbolName)
+ << "\01l_OBJC_PROTOCOL_$_" << PD->getObjCRuntimeNameAsString();
+
+ Entry = values.finishAndCreateGlobal(symbolName, CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::WeakAnyLinkage);
+ if (!CGM.getTriple().isOSBinFormatMachO())
+ Entry->setComdat(CGM.getModule().getOrInsertComdat(symbolName));
Protocols[PD->getIdentifier()] = Entry;
}
@@ -6635,13 +6850,20 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
// Use this protocol meta-data to build protocol list table in section
// __DATA, __objc_protolist
+ llvm::SmallString<64> ProtocolRef;
+ llvm::raw_svector_ostream(ProtocolRef) << "\01l_OBJC_LABEL_PROTOCOL_$_"
+ << PD->getObjCRuntimeNameAsString();
+
llvm::GlobalVariable *PTGV =
new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABIPtrTy,
false, llvm::GlobalValue::WeakAnyLinkage, Entry,
- "\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
+ ProtocolRef);
+ if (!CGM.getTriple().isOSBinFormatMachO())
+ PTGV->setComdat(CGM.getModule().getOrInsertComdat(ProtocolRef));
PTGV->setAlignment(
CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
- PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
+ if (CGM.getTriple().isOSBinFormatMachO())
+ PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.addCompilerUsedGlobal(PTGV);
return Entry;
@@ -6673,55 +6895,30 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
if (GV)
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy);
- for (; begin != end; ++begin)
- ProtocolRefs.push_back(GetProtocolRef(*begin)); // Implemented???
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ auto countSlot = values.addPlaceholder();
- // This list is null terminated.
- ProtocolRefs.push_back(llvm::Constant::getNullValue(
- ObjCTypes.ProtocolnfABIPtrTy));
-
- llvm::Constant *Values[2];
- Values[0] =
- llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
- Values[1] =
- llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
- ProtocolRefs.size()),
- ProtocolRefs);
-
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
- GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::PrivateLinkage,
- Init, Name);
- GV->setSection("__DATA, __objc_const");
- GV->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(Init->getType()));
+ // A null-terminated array of protocols.
+ auto array = values.beginArray(ObjCTypes.ProtocolnfABIPtrTy);
+ for (; begin != end; ++begin)
+ array.add(GetProtocolRef(*begin)); // Implemented???
+ auto count = array.size();
+ array.addNullPointer(ObjCTypes.ProtocolnfABIPtrTy);
+
+ array.finishAndAddTo(values);
+ values.fillPlaceholderWithInt(countSlot, ObjCTypes.LongTy, count);
+
+ GV = values.finishAndCreateGlobal(Name, CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GV->setSection("__DATA, __objc_const");
CGM.addCompilerUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV,
ObjCTypes.ProtocolListnfABIPtrTy);
}
-/// GetMethodDescriptionConstant - This routine build following meta-data:
-/// struct _objc_method {
-/// SEL _cmd;
-/// char *method_type;
-/// char *_imp;
-/// }
-
-llvm::Constant *
-CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
- llvm::Constant *Desc[3];
- Desc[0] =
- llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
- Desc[1] = GetMethodVarType(MD);
- if (!Desc[1])
- return nullptr;
-
- // Protocol methods have no implementation. So, this entry is always NULL.
- Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
- return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
-}
-
/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
/// This code gen. amounts to generating code for:
/// @code
@@ -6853,16 +7050,15 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
= CGM.getModule().getGlobalVariable(messageRefName);
if (!messageRef) {
// Build the message ref structure.
- llvm::Constant *values[] = { fn, GetMethodVarName(selector) };
- llvm::Constant *init = llvm::ConstantStruct::getAnon(values);
- messageRef = new llvm::GlobalVariable(CGM.getModule(),
- init->getType(),
- /*constant*/ false,
- llvm::GlobalValue::WeakAnyLinkage,
- init,
- messageRefName);
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ values.add(fn);
+ values.add(GetMethodVarName(selector));
+ messageRef = values.finishAndCreateGlobal(messageRefName,
+ CharUnits::fromQuantity(16),
+ /*constant*/ false,
+ llvm::GlobalValue::WeakAnyLinkage);
messageRef->setVisibility(llvm::GlobalValue::HiddenVisibility);
- messageRef->setAlignment(16);
messageRef->setSection("__DATA, __objc_msgrefs, coalesced");
}
@@ -6887,9 +7083,10 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
// Load the function to call from the message ref table.
Address calleeAddr =
CGF.Builder.CreateStructGEP(mref, 0, CharUnits::Zero());
- llvm::Value *callee = CGF.Builder.CreateLoad(calleeAddr, "msgSend_fn");
+ llvm::Value *calleePtr = CGF.Builder.CreateLoad(calleeAddr, "msgSend_fn");
- callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType);
+ calleePtr = CGF.Builder.CreateBitCast(calleePtr, MSI.MessengerType);
+ CGCallee callee(CGCalleeInfo(), calleePtr);
RValue result = CGF.EmitCall(MSI.CallInfo, callee, returnSlot, args);
return nullReturn.complete(CGF, result, resultType, formalArgs,
@@ -6916,33 +7113,59 @@ CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
false, CallArgs, Method, Class, ObjCTypes);
}
-llvm::GlobalVariable *
-CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name, bool Weak) {
+llvm::Constant *
+CGObjCNonFragileABIMac::GetClassGlobal(const ObjCInterfaceDecl *ID,
+ bool metaclass,
+ ForDefinition_t isForDefinition) {
+ auto prefix =
+ (metaclass ? getMetaclassSymbolPrefix() : getClassSymbolPrefix());
+ return GetClassGlobal((prefix + ID->getObjCRuntimeNameAsString()).str(),
+ isForDefinition,
+ ID->isWeakImported(),
+ !isForDefinition
+ && CGM.getTriple().isOSBinFormatCOFF()
+ && ID->hasAttr<DLLImportAttr>());
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name,
+ ForDefinition_t IsForDefinition,
+ bool Weak, bool DLLImport) {
llvm::GlobalValue::LinkageTypes L =
Weak ? llvm::GlobalValue::ExternalWeakLinkage
: llvm::GlobalValue::ExternalLinkage;
- llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
- if (!GV)
+
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (!GV) {
GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
false, L, nullptr, Name);
+ if (DLLImport)
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ }
+
assert(GV->getLinkage() == L);
return GV;
}
-llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
- IdentifierInfo *II,
- bool Weak,
- const ObjCInterfaceDecl *ID) {
+llvm::Value *
+CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
+ IdentifierInfo *II,
+ const ObjCInterfaceDecl *ID) {
CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable *&Entry = ClassReferences[II];
if (!Entry) {
- StringRef Name = ID ? ID->getObjCRuntimeNameAsString() : II->getName();
- std::string ClassName = (getClassSymbolPrefix() + Name).str();
- llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName, Weak);
+ llvm::Constant *ClassGV;
+ if (ID) {
+ ClassGV = GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition);
+ } else {
+ ClassGV = GetClassGlobal((getClassSymbolPrefix() + II->getName()).str(),
+ NotForDefinition);
+ }
+
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
ClassGV, "OBJC_CLASSLIST_REFERENCES_$_");
@@ -6960,13 +7183,13 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CodeGenFunction &CGF,
if (ID->hasAttr<ObjCRuntimeVisibleAttr>())
return EmitClassRefViaRuntime(CGF, ID, ObjCTypes);
- return EmitClassRefFromId(CGF, ID->getIdentifier(), ID->isWeakImported(), ID);
+ return EmitClassRefFromId(CGF, ID->getIdentifier(), ID);
}
llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
CodeGenFunction &CGF) {
IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
- return EmitClassRefFromId(CGF, II, false, nullptr);
+ return EmitClassRefFromId(CGF, II, nullptr);
}
llvm::Value *
@@ -6976,10 +7199,7 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
if (!Entry) {
- llvm::SmallString<64> ClassName(getClassSymbolPrefix());
- ClassName += ID->getObjCRuntimeNameAsString();
- llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName.str(),
- ID->isWeakImported());
+ auto ClassGV = GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition);
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
ClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
@@ -6999,10 +7219,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
if (!Entry) {
- llvm::SmallString<64> MetaClassName(getMetaclassSymbolPrefix());
- MetaClassName += ID->getObjCRuntimeNameAsString();
- llvm::GlobalVariable *MetaClassGV =
- GetClassGlobal(MetaClassName.str(), Weak);
+ auto MetaClassGV = GetClassGlobal(ID, /*metaclass*/ true, NotForDefinition);
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
@@ -7021,11 +7238,10 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
llvm::Value *CGObjCNonFragileABIMac::GetClass(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID) {
if (ID->isWeakImported()) {
- llvm::SmallString<64> ClassName(getClassSymbolPrefix());
- ClassName += ID->getObjCRuntimeNameAsString();
- llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName.str(), true);
+ auto ClassGV = GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition);
(void)ClassGV;
- assert(ClassGV->hasExternalWeakLinkage());
+ assert(!isa<llvm::GlobalVariable>(ClassGV) ||
+ cast<llvm::GlobalVariable>(ClassGV)->hasExternalWeakLinkage());
}
return EmitClassRef(CGF, ID);
@@ -7258,7 +7474,7 @@ CGObjCNonFragileABIMac::GetEHType(QualType T) {
const ObjCInterfaceType *IT = PT->getInterfaceType();
assert(IT && "Invalid @catch type.");
- return GetInterfaceEHType(IT->getDecl(), false);
+ return GetInterfaceEHType(IT->getDecl(), NotForDefinition);
}
void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
@@ -7290,13 +7506,13 @@ void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
llvm::Constant *
CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
- bool ForDefinition) {
+ ForDefinition_t IsForDefinition) {
llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
StringRef ClassName = ID->getObjCRuntimeNameAsString();
// If we don't need a definition, return the entry if found or check
// if we use an external reference.
- if (!ForDefinition) {
+ if (!IsForDefinition) {
if (Entry)
return Entry;
@@ -7332,23 +7548,24 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
}
llvm::Value *VTableIdx = llvm::ConstantInt::get(CGM.Int32Ty, 2);
- llvm::Constant *Values[] = {
- llvm::ConstantExpr::getGetElementPtr(VTableGV->getValueType(), VTableGV,
- VTableIdx),
- GetClassName(ID->getObjCRuntimeNameAsString()),
- GetClassGlobal((getClassSymbolPrefix() + ClassName).str()),
- };
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
-
- llvm::GlobalValue::LinkageTypes L = ForDefinition
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.EHTypeTy);
+ values.add(llvm::ConstantExpr::getGetElementPtr(VTableGV->getValueType(),
+ VTableGV, VTableIdx));
+ values.add(GetClassName(ClassName));
+ values.add(GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition));
+
+ llvm::GlobalValue::LinkageTypes L = IsForDefinition
? llvm::GlobalValue::ExternalLinkage
: llvm::GlobalValue::WeakAnyLinkage;
if (Entry) {
- Entry->setInitializer(Init);
+ values.finishAndSetAsInitializer(Entry);
+ Entry->setAlignment(CGM.getPointerAlign().getQuantity());
} else {
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false, L,
- Init, ("OBJC_EHTYPE_$_" + ClassName).str());
+ Entry = values.finishAndCreateGlobal("OBJC_EHTYPE_$_" + ClassName,
+ CGM.getPointerAlign(),
+ /*constant*/ false,
+ L);
if (CGM.getTriple().isOSBinFormatCOFF())
if (hasObjCExceptionAttribute(CGM.getContext(), ID))
if (ID->hasAttr<DLLExportAttr>())
@@ -7360,11 +7577,9 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
if (ID->getVisibility() == HiddenVisibility)
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
- const auto &DL = CGM.getDataLayout();
- Entry->setAlignment(DL.getABITypeAlignment(ObjCTypes.EHTypeTy));
-
- if (ForDefinition)
- Entry->setSection("__DATA,__objc_const");
+ if (IsForDefinition)
+ if (CGM.getTriple().isOSBinFormatMachO())
+ Entry->setSection("__DATA,__objc_const");
return Entry;
}
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 0caf6d9f210a..3da7ed230edd 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -90,7 +90,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
- QualType IvarTy = Ivar->getType();
+ QualType IvarTy = Ivar->getType().withCVRQualifiers(CVRQualifiers);
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
@@ -98,7 +98,6 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
if (!Ivar->isBitField()) {
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
- LV.getQuals().addCVRQualifiers(CVRQualifiers);
return LV;
}
@@ -139,9 +138,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
Addr = CGF.Builder.CreateElementBitCast(Addr,
llvm::Type::getIntNTy(CGF.getLLVMContext(),
Info->StorageSize));
- return LValue::MakeBitfield(Addr, *Info,
- IvarTy.withCVRQualifiers(CVRQualifiers),
- AlignmentSource::Decl);
+ return LValue::MakeBitfield(Addr, *Info, IvarTy, AlignmentSource::Decl);
}
namespace {
@@ -153,18 +150,16 @@ namespace {
};
struct CallObjCEndCatch final : EHScopeStack::Cleanup {
- CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
- MightThrow(MightThrow), Fn(Fn) {}
+ CallObjCEndCatch(bool MightThrow, llvm::Value *Fn)
+ : MightThrow(MightThrow), Fn(Fn) {}
bool MightThrow;
llvm::Value *Fn;
void Emit(CodeGenFunction &CGF, Flags flags) override {
- if (!MightThrow) {
- CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
- return;
- }
-
- CGF.EmitRuntimeCallOrInvoke(Fn);
+ if (MightThrow)
+ CGF.EmitRuntimeCallOrInvoke(Fn);
+ else
+ CGF.EmitNounwindRuntimeCall(Fn);
}
};
}
@@ -233,10 +228,8 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
// Enter the catch.
llvm::Value *Exn = RawExn;
- if (beginCatchFn) {
- Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted");
- cast<llvm::CallInst>(Exn)->setDoesNotThrow();
- }
+ if (beginCatchFn)
+ Exn = CGF.EmitNounwindRuntimeCall(beginCatchFn, RawExn, "exn.adjusted");
CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 6c330590f7cd..a14b44abf413 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -280,9 +280,6 @@ public:
virtual llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM,
QualType T) = 0;
- virtual llvm::GlobalVariable *GetClassGlobal(StringRef Name,
- bool Weak = false) = 0;
-
struct MessageSendInfo {
const CGFunctionInfo &CallInfo;
llvm::PointerType *MessengerType;
diff --git a/lib/CodeGen/CGOpenCLRuntime.cpp b/lib/CodeGen/CGOpenCLRuntime.cpp
index 38aebea18ed3..9062936fdd14 100644
--- a/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -15,6 +15,7 @@
#include "CGOpenCLRuntime.h"
#include "CodeGenFunction.h"
+#include "TargetInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include <assert.h>
@@ -34,10 +35,10 @@ llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
"Not an OpenCL specific type!");
llvm::LLVMContext& Ctx = CGM.getLLVMContext();
- uint32_t ImgAddrSpc =
- CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
+ uint32_t ImgAddrSpc = CGM.getContext().getTargetAddressSpace(
+ CGM.getTarget().getOpenCLImageAddrSpace());
switch (cast<BuiltinType>(T)->getKind()) {
- default:
+ default:
llvm_unreachable("Unexpected opencl builtin type!");
return nullptr;
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
@@ -47,7 +48,7 @@ llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
ImgAddrSpc);
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLSampler:
- return llvm::IntegerType::get(Ctx, 32);
+ return getSamplerType();
case BuiltinType::OCLEvent:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.event_t"), 0);
@@ -76,3 +77,32 @@ llvm::Type *CGOpenCLRuntime::getPipeType() {
return PipeTy;
}
+
+llvm::PointerType *CGOpenCLRuntime::getSamplerType() {
+ if (!SamplerTy)
+ SamplerTy = llvm::PointerType::get(llvm::StructType::create(
+ CGM.getLLVMContext(), "opencl.sampler_t"),
+ CGM.getContext().getTargetAddressSpace(
+ LangAS::opencl_constant));
+ return SamplerTy;
+}
+
+llvm::Value *CGOpenCLRuntime::getPipeElemSize(const Expr *PipeArg) {
+ const PipeType *PipeTy = PipeArg->getType()->getAs<PipeType>();
+ // The type of the last (implicit) argument to be passed.
+ llvm::Type *Int32Ty = llvm::IntegerType::getInt32Ty(CGM.getLLVMContext());
+ unsigned TypeSize = CGM.getContext()
+ .getTypeSizeInChars(PipeTy->getElementType())
+ .getQuantity();
+ return llvm::ConstantInt::get(Int32Ty, TypeSize, false);
+}
+
+llvm::Value *CGOpenCLRuntime::getPipeElemAlign(const Expr *PipeArg) {
+ const PipeType *PipeTy = PipeArg->getType()->getAs<PipeType>();
+ // The type of the last (implicit) argument to be passed.
+ llvm::Type *Int32Ty = llvm::IntegerType::getInt32Ty(CGM.getLLVMContext());
+ unsigned TypeSize = CGM.getContext()
+ .getTypeAlignInChars(PipeTy->getElementType())
+ .getQuantity();
+ return llvm::ConstantInt::get(Int32Ty, TypeSize, false);
+}
diff --git a/lib/CodeGen/CGOpenCLRuntime.h b/lib/CodeGen/CGOpenCLRuntime.h
index f1a7a3106443..ee3cb3dda063 100644
--- a/lib/CodeGen/CGOpenCLRuntime.h
+++ b/lib/CodeGen/CGOpenCLRuntime.h
@@ -33,9 +33,11 @@ class CGOpenCLRuntime {
protected:
CodeGenModule &CGM;
llvm::Type *PipeTy;
+ llvm::PointerType *SamplerTy;
public:
- CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM), PipeTy(nullptr) {}
+ CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM), PipeTy(nullptr),
+ SamplerTy(nullptr) {}
virtual ~CGOpenCLRuntime();
/// Emit the IR required for a work-group-local variable declaration, and add
@@ -47,6 +49,16 @@ public:
virtual llvm::Type *convertOpenCLSpecificType(const Type *T);
virtual llvm::Type *getPipeType();
+
+ llvm::PointerType *getSamplerType();
+
+ // \brief Returnes a value which indicates the size in bytes of the pipe
+ // element.
+ virtual llvm::Value *getPipeElemSize(const Expr *PipeArg);
+
+ // \brief Returnes a value which indicates the alignment in bytes of the pipe
+ // element.
+ virtual llvm::Value *getPipeElemAlign(const Expr *PipeArg);
};
}
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index 6a0edbe0e7a9..0624d86b564a 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -15,10 +15,11 @@
#include "CGCleanup.h"
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
+#include "ConstantBuilder.h"
#include "clang/AST/Decl.h"
#include "clang/AST/StmtOpenMP.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
@@ -489,7 +490,7 @@ enum OpenMPSchedType {
OMP_sch_runtime = 37,
OMP_sch_auto = 38,
/// static with chunk adjustment (e.g., simd)
- OMP_sch_static_balanced_chunked = 45,
+ OMP_sch_static_balanced_chunked = 45,
/// \brief Lower bound for 'ordered' versions.
OMP_ord_lower = 64,
OMP_ord_static_chunked = 65,
@@ -756,6 +757,7 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
FnTy, llvm::GlobalValue::InternalLinkage,
IsCombiner ? ".omp_combiner." : ".omp_initializer.", &CGM.getModule());
CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
+ Fn->removeFnAttr(llvm::Attribute::NoInline);
Fn->addFnAttr(llvm::Attribute::AlwaysInline);
CodeGenFunction CGF(CGM);
// Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
@@ -906,18 +908,19 @@ Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
DefaultOpenMPPSource =
llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
}
- auto DefaultOpenMPLocation = new llvm::GlobalVariable(
- CGM.getModule(), IdentTy, /*isConstant*/ true,
- llvm::GlobalValue::PrivateLinkage, /*Initializer*/ nullptr);
+
+ ConstantInitBuilder builder(CGM);
+ auto fields = builder.beginStruct(IdentTy);
+ fields.addInt(CGM.Int32Ty, 0);
+ fields.addInt(CGM.Int32Ty, Flags);
+ fields.addInt(CGM.Int32Ty, 0);
+ fields.addInt(CGM.Int32Ty, 0);
+ fields.add(DefaultOpenMPPSource);
+ auto DefaultOpenMPLocation =
+ fields.finishAndCreateGlobal("", Align, /*isConstant*/ true,
+ llvm::GlobalValue::PrivateLinkage);
DefaultOpenMPLocation->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- DefaultOpenMPLocation->setAlignment(Align.getQuantity());
-
- llvm::Constant *Zero = llvm::ConstantInt::get(CGM.Int32Ty, 0, true);
- llvm::Constant *Values[] = {Zero,
- llvm::ConstantInt::get(CGM.Int32Ty, Flags),
- Zero, Zero, DefaultOpenMPPSource};
- llvm::Constant *Init = llvm::ConstantStruct::get(IdentTy, Values);
- DefaultOpenMPLocation->setInitializer(Init);
+
OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
}
return Address(Entry, Align);
@@ -2767,7 +2770,6 @@ createOffloadingBinaryDescriptorFunction(CodeGenModule &CGM, StringRef Name,
Args.push_back(&DummyPtr);
CodeGenFunction CGF(CGM);
- GlobalDecl();
auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
auto FTy = CGM.getTypes().GetFunctionType(FI);
auto *Fn =
@@ -2810,9 +2812,10 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
".omp_offloading.entries_end");
// Create all device images
- llvm::SmallVector<llvm::Constant *, 4> DeviceImagesEntires;
auto *DeviceImageTy = cast<llvm::StructType>(
CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
+ ConstantInitBuilder DeviceImagesBuilder(CGM);
+ auto DeviceImagesEntries = DeviceImagesBuilder.beginArray(DeviceImageTy);
for (unsigned i = 0; i < Devices.size(); ++i) {
StringRef T = Devices[i].getTriple();
@@ -2824,22 +2827,19 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
/*Initializer=*/nullptr, Twine(".omp_offloading.img_end.") + Twine(T));
- llvm::Constant *Dev =
- llvm::ConstantStruct::get(DeviceImageTy, ImgBegin, ImgEnd,
- HostEntriesBegin, HostEntriesEnd, nullptr);
- DeviceImagesEntires.push_back(Dev);
+ auto Dev = DeviceImagesEntries.beginStruct(DeviceImageTy);
+ Dev.add(ImgBegin);
+ Dev.add(ImgEnd);
+ Dev.add(HostEntriesBegin);
+ Dev.add(HostEntriesEnd);
+ Dev.finishAndAddTo(DeviceImagesEntries);
}
// Create device images global array.
- llvm::ArrayType *DeviceImagesInitTy =
- llvm::ArrayType::get(DeviceImageTy, DeviceImagesEntires.size());
- llvm::Constant *DeviceImagesInit =
- llvm::ConstantArray::get(DeviceImagesInitTy, DeviceImagesEntires);
-
- llvm::GlobalVariable *DeviceImages = new llvm::GlobalVariable(
- M, DeviceImagesInitTy, /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, DeviceImagesInit,
- ".omp_offloading.device_images");
+ llvm::GlobalVariable *DeviceImages =
+ DeviceImagesEntries.finishAndCreateGlobal(".omp_offloading.device_images",
+ CGM.getPointerAlign(),
+ /*isConstant=*/true);
DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
// This is a Zero array to be used in the creation of the constant expressions
@@ -2849,16 +2849,18 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
// Create the target region descriptor.
auto *BinaryDescriptorTy = cast<llvm::StructType>(
CGM.getTypes().ConvertTypeForMem(getTgtBinaryDescriptorQTy()));
- llvm::Constant *TargetRegionsDescriptorInit = llvm::ConstantStruct::get(
- BinaryDescriptorTy, llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
- llvm::ConstantExpr::getGetElementPtr(DeviceImagesInitTy, DeviceImages,
- Index),
- HostEntriesBegin, HostEntriesEnd, nullptr);
-
- auto *Desc = new llvm::GlobalVariable(
- M, BinaryDescriptorTy, /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, TargetRegionsDescriptorInit,
- ".omp_offloading.descriptor");
+ ConstantInitBuilder DescBuilder(CGM);
+ auto DescInit = DescBuilder.beginStruct(BinaryDescriptorTy);
+ DescInit.addInt(CGM.Int32Ty, Devices.size());
+ DescInit.add(llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
+ DeviceImages,
+ Index));
+ DescInit.add(HostEntriesBegin);
+ DescInit.add(HostEntriesEnd);
+
+ auto *Desc = DescInit.finishAndCreateGlobal(".omp_offloading.descriptor",
+ CGM.getPointerAlign(),
+ /*isConstant=*/true);
// Emit code to register or unregister the descriptor at execution
// startup or closing, respectively.
@@ -2906,25 +2908,30 @@ void CGOpenMPRuntime::createOffloadEntry(llvm::Constant *ID,
Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
llvm::Constant *StrPtr = llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy);
+ // We can't have any padding between symbols, so we need to have 1-byte
+ // alignment.
+ auto Align = CharUnits::fromQuantity(1);
+
// Create the entry struct.
- llvm::Constant *EntryInit = llvm::ConstantStruct::get(
- TgtOffloadEntryType, AddrPtr, StrPtr,
- llvm::ConstantInt::get(CGM.SizeTy, Size), nullptr);
- llvm::GlobalVariable *Entry = new llvm::GlobalVariable(
- M, TgtOffloadEntryType, true, llvm::GlobalValue::ExternalLinkage,
- EntryInit, ".omp_offloading.entry");
+ ConstantInitBuilder EntryBuilder(CGM);
+ auto EntryInit = EntryBuilder.beginStruct(TgtOffloadEntryType);
+ EntryInit.add(AddrPtr);
+ EntryInit.add(StrPtr);
+ EntryInit.addInt(CGM.SizeTy, Size);
+ llvm::GlobalVariable *Entry =
+ EntryInit.finishAndCreateGlobal(".omp_offloading.entry",
+ Align,
+ /*constant*/ true,
+ llvm::GlobalValue::ExternalLinkage);
// The entry has to be created in the section the linker expects it to be.
Entry->setSection(".omp_offloading.entries");
- // We can't have any padding between symbols, so we need to have 1-byte
- // alignment.
- Entry->setAlignment(1);
}
void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
// Emit the offloading entries and metadata so that the device codegen side
- // can
- // easily figure out what to emit. The produced metadata looks like this:
+ // can easily figure out what to emit. The produced metadata looks like
+ // this:
//
// !omp_offload.info = !{!1, ...}
//
@@ -3012,7 +3019,8 @@ void CGOpenMPRuntime::loadOffloadInfoMetadata() {
return;
llvm::LLVMContext C;
- auto ME = llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C);
+ auto ME = expectedToErrorOrAndEmitErrors(
+ C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
if (ME.getError())
return;
@@ -3465,6 +3473,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
".omp_task_privates_map.", &CGM.getModule());
CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskPrivatesMap,
TaskPrivatesMapFnInfo);
+ TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
CodeGenFunction CGF(CGM);
CGF.disableDebugInfo();
@@ -4436,9 +4445,8 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
auto *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
auto *ThreadId = getThreadID(CGF, Loc);
auto *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
- auto *RL =
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(ReductionList.getPointer(),
- CGF.VoidPtrTy);
+ auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ ReductionList.getPointer(), CGF.VoidPtrTy);
llvm::Value *Args[] = {
IdentTLoc, // ident_t *<loc>
ThreadId, // i32 <gtid>
@@ -4981,6 +4989,9 @@ public:
/// map/privatization results in multiple arguments passed to the runtime
/// library.
OMP_MAP_FIRST_REF = 0x20,
+ /// \brief Signal that the runtime library has to return the device pointer
+ /// in the current position for the data being mapped.
+ OMP_MAP_RETURN_PTR = 0x40,
/// \brief This flag signals that the reference being passed is a pointer to
/// private data.
OMP_MAP_PRIVATE_PTR = 0x80,
@@ -4988,12 +4999,30 @@ public:
OMP_MAP_PRIVATE_VAL = 0x100,
};
+ /// Class that associates information with a base pointer to be passed to the
+ /// runtime library.
+ class BasePointerInfo {
+ /// The base pointer.
+ llvm::Value *Ptr = nullptr;
+ /// The base declaration that refers to this device pointer, or null if
+ /// there is none.
+ const ValueDecl *DevPtrDecl = nullptr;
+
+ public:
+ BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
+ : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
+ llvm::Value *operator*() const { return Ptr; }
+ const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
+ void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
+ };
+
+ typedef SmallVector<BasePointerInfo, 16> MapBaseValuesArrayTy;
typedef SmallVector<llvm::Value *, 16> MapValuesArrayTy;
typedef SmallVector<unsigned, 16> MapFlagsArrayTy;
private:
/// \brief Directive from where the map clauses were extracted.
- const OMPExecutableDirective &Directive;
+ const OMPExecutableDirective &CurDir;
/// \brief Function the directive is being generated for.
CodeGenFunction &CGF;
@@ -5001,6 +5030,13 @@ private:
/// \brief Set of all first private variables in the current directive.
llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls;
+ /// Map between device pointer declarations and their expression components.
+ /// The key value for declarations in 'this' is null.
+ llvm::DenseMap<
+ const ValueDecl *,
+ SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
+ DevPointersMap;
+
llvm::Value *getExprTypeSize(const Expr *E) const {
auto ExprTy = E->getType().getCanonicalType();
@@ -5129,7 +5165,7 @@ private:
void generateInfoForComponentList(
OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
- MapValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
+ MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
bool IsFirstComponentList) const {
@@ -5271,15 +5307,13 @@ private:
// If the variable is a pointer and is being dereferenced (i.e. is not
// the last component), the base has to be the pointer itself, not its
- // reference.
- if (I->getAssociatedDeclaration()->getType()->isAnyPointerType() &&
- std::next(I) != CE) {
- auto PtrAddr = CGF.MakeNaturalAlignAddrLValue(
- BP, I->getAssociatedDeclaration()->getType());
+ // reference. References are ignored for mapping purposes.
+ QualType Ty =
+ I->getAssociatedDeclaration()->getType().getNonReferenceType();
+ if (Ty->isAnyPointerType() && std::next(I) != CE) {
+ auto PtrAddr = CGF.MakeNaturalAlignAddrLValue(BP, Ty);
BP = CGF.EmitLoadOfPointerLValue(PtrAddr.getAddress(),
- I->getAssociatedDeclaration()
- ->getType()
- ->getAs<PointerType>())
+ Ty->castAs<PointerType>())
.getPointer();
// We do not need to generate individual map information for the
@@ -5322,14 +5356,34 @@ private:
isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
"Unexpected expression");
- // Save the base we are currently using.
- BasePointers.push_back(BP);
-
auto *LB = CGF.EmitLValue(I->getAssociatedExpression()).getPointer();
auto *Size = getExprTypeSize(I->getAssociatedExpression());
+ // If we have a member expression and the current component is a
+ // reference, we have to map the reference too. Whenever we have a
+ // reference, the section that reference refers to is going to be a
+ // load instruction from the storage assigned to the reference.
+ if (isa<MemberExpr>(I->getAssociatedExpression()) &&
+ I->getAssociatedDeclaration()->getType()->isReferenceType()) {
+ auto *LI = cast<llvm::LoadInst>(LB);
+ auto *RefAddr = LI->getPointerOperand();
+
+ BasePointers.push_back(BP);
+ Pointers.push_back(RefAddr);
+ Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
+ Types.push_back(getMapTypeBits(
+ /*MapType*/ OMPC_MAP_alloc, /*MapTypeModifier=*/OMPC_MAP_unknown,
+ !IsExpressionFirstInfo, IsCaptureFirstInfo));
+ IsExpressionFirstInfo = false;
+ IsCaptureFirstInfo = false;
+ // The reference will be the next base address.
+ BP = RefAddr;
+ }
+
+ BasePointers.push_back(BP);
Pointers.push_back(LB);
Sizes.push_back(Size);
+
// We need to add a pointer flag for each map that comes from the
// same expression except for the first one. We also need to signal
// this map is the first one that relates with the current capture
@@ -5373,17 +5427,23 @@ private:
public:
MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
- : Directive(Dir), CGF(CGF) {
+ : CurDir(Dir), CGF(CGF) {
// Extract firstprivate clause information.
for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
for (const auto *D : C->varlists())
FirstPrivateDecls.insert(
cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
+ // Extract device pointer clause information.
+ for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
+ for (auto L : C->component_lists())
+ DevPointersMap[L.first].push_back(L.second);
}
/// \brief Generate all the base pointers, section pointers, sizes and map
- /// types for the extracted mappable expressions.
- void generateAllInfo(MapValuesArrayTy &BasePointers,
+ /// types for the extracted mappable expressions. Also, for each item that
+ /// relates with a device pointer, a pair of the relevant declaration and
+ /// index where it occurs is appended to the device pointers info array.
+ void generateAllInfo(MapBaseValuesArrayTy &BasePointers,
MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
MapFlagsArrayTy &Types) const {
BasePointers.clear();
@@ -5392,9 +5452,32 @@ public:
Types.clear();
struct MapInfo {
+ /// Kind that defines how a device pointer has to be returned.
+ enum ReturnPointerKind {
+ // Don't have to return any pointer.
+ RPK_None,
+ // Pointer is the base of the declaration.
+ RPK_Base,
+ // Pointer is a member of the base declaration - 'this'
+ RPK_Member,
+ // Pointer is a reference and a member of the base declaration - 'this'
+ RPK_MemberReference,
+ };
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
OpenMPMapClauseKind MapType;
OpenMPMapClauseKind MapTypeModifier;
+ ReturnPointerKind ReturnDevicePointer;
+
+ MapInfo()
+ : MapType(OMPC_MAP_unknown), MapTypeModifier(OMPC_MAP_unknown),
+ ReturnDevicePointer(RPK_None) {}
+ MapInfo(
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
+ OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
+ ReturnPointerKind ReturnDevicePointer)
+ : Components(Components), MapType(MapType),
+ MapTypeModifier(MapTypeModifier),
+ ReturnDevicePointer(ReturnDevicePointer) {}
};
// We have to process the component lists that relate with the same
@@ -5404,24 +5487,77 @@ public:
// Helper function to fill the information map for the different supported
// clauses.
- auto &&InfoGen =
- [&Info](const ValueDecl *D,
- OMPClauseMappableExprCommon::MappableExprComponentListRef L,
- OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier) {
- const ValueDecl *VD =
- D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
- Info[VD].push_back({L, MapType, MapModifier});
- };
+ auto &&InfoGen = [&Info](
+ const ValueDecl *D,
+ OMPClauseMappableExprCommon::MappableExprComponentListRef L,
+ OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier,
+ MapInfo::ReturnPointerKind ReturnDevicePointer) {
+ const ValueDecl *VD =
+ D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
+ Info[VD].push_back({L, MapType, MapModifier, ReturnDevicePointer});
+ };
- for (auto *C : Directive.getClausesOfKind<OMPMapClause>())
+ // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
+ for (auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
for (auto L : C->component_lists())
- InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier());
- for (auto *C : Directive.getClausesOfKind<OMPToClause>())
+ InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(),
+ MapInfo::RPK_None);
+ for (auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
for (auto L : C->component_lists())
- InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown);
- for (auto *C : Directive.getClausesOfKind<OMPFromClause>())
+ InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown,
+ MapInfo::RPK_None);
+ for (auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
for (auto L : C->component_lists())
- InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown);
+ InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown,
+ MapInfo::RPK_None);
+
+ // Look at the use_device_ptr clause information and mark the existing map
+ // entries as such. If there is no map information for an entry in the
+ // use_device_ptr list, we create one with map type 'alloc' and zero size
+ // section. It is the user fault if that was not mapped before.
+ // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
+ for (auto *C : this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>())
+ for (auto L : C->component_lists()) {
+ assert(!L.second.empty() && "Not expecting empty list of components!");
+ const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ auto *IE = L.second.back().getAssociatedExpression();
+ // If the first component is a member expression, we have to look into
+ // 'this', which maps to null in the map of map information. Otherwise
+ // look directly for the information.
+ auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
+
+ // We potentially have map information for this declaration already.
+ // Look for the first set of components that refer to it.
+ if (It != Info.end()) {
+ auto CI = std::find_if(
+ It->second.begin(), It->second.end(), [VD](const MapInfo &MI) {
+ return MI.Components.back().getAssociatedDeclaration() == VD;
+ });
+ // If we found a map entry, signal that the pointer has to be returned
+ // and move on to the next declaration.
+ if (CI != It->second.end()) {
+ CI->ReturnDevicePointer = isa<MemberExpr>(IE)
+ ? (VD->getType()->isReferenceType()
+ ? MapInfo::RPK_MemberReference
+ : MapInfo::RPK_Member)
+ : MapInfo::RPK_Base;
+ continue;
+ }
+ }
+
+ // We didn't find any match in our map information - generate a zero
+ // size array section.
+ // FIXME: MSVC 2013 seems to require this-> to find member CGF.
+ llvm::Value *Ptr =
+ this->CGF
+ .EmitLoadOfLValue(this->CGF.EmitLValue(IE), SourceLocation())
+ .getScalarVal();
+ BasePointers.push_back({Ptr, VD});
+ Pointers.push_back(Ptr);
+ Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
+ Types.push_back(OMP_MAP_RETURN_PTR | OMP_MAP_FIRST_REF);
+ }
for (auto &M : Info) {
// We need to know when we generate information for the first component
@@ -5430,9 +5566,36 @@ public:
for (MapInfo &L : M.second) {
assert(!L.Components.empty() &&
"Not expecting declaration with no component lists.");
- generateInfoForComponentList(L.MapType, L.MapTypeModifier, L.Components,
- BasePointers, Pointers, Sizes, Types,
- IsFirstComponentList);
+
+ // Remember the current base pointer index.
+ unsigned CurrentBasePointersIdx = BasePointers.size();
+ // FIXME: MSVC 2013 seems to require this-> to find the member method.
+ this->generateInfoForComponentList(L.MapType, L.MapTypeModifier,
+ L.Components, BasePointers, Pointers,
+ Sizes, Types, IsFirstComponentList);
+
+ // If this entry relates with a device pointer, set the relevant
+ // declaration and add the 'return pointer' flag.
+ if (IsFirstComponentList &&
+ L.ReturnDevicePointer != MapInfo::RPK_None) {
+ // If the pointer is not the base of the map, we need to skip the
+ // base. If it is a reference in a member field, we also need to skip
+ // the map of the reference.
+ if (L.ReturnDevicePointer != MapInfo::RPK_Base) {
+ ++CurrentBasePointersIdx;
+ if (L.ReturnDevicePointer == MapInfo::RPK_MemberReference)
+ ++CurrentBasePointersIdx;
+ }
+ assert(BasePointers.size() > CurrentBasePointersIdx &&
+ "Unexpected number of mapped base pointers.");
+
+ auto *RelevantVD = L.Components.back().getAssociatedDeclaration();
+ assert(RelevantVD &&
+ "No relevant declaration related with device pointer??");
+
+ BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
+ Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PTR;
+ }
IsFirstComponentList = false;
}
}
@@ -5441,7 +5604,8 @@ public:
/// \brief Generate the base pointers, section pointers, sizes and map types
/// associated to a given capture.
void generateInfoForCapture(const CapturedStmt::Capture *Cap,
- MapValuesArrayTy &BasePointers,
+ llvm::Value *Arg,
+ MapBaseValuesArrayTy &BasePointers,
MapValuesArrayTy &Pointers,
MapValuesArrayTy &Sizes,
MapFlagsArrayTy &Types) const {
@@ -5453,15 +5617,40 @@ public:
Sizes.clear();
Types.clear();
+ // We need to know when we generating information for the first component
+ // associated with a capture, because the mapping flags depend on it.
+ bool IsFirstComponentList = true;
+
const ValueDecl *VD =
Cap->capturesThis()
? nullptr
: cast<ValueDecl>(Cap->getCapturedVar()->getCanonicalDecl());
- // We need to know when we generating information for the first component
- // associated with a capture, because the mapping flags depend on it.
- bool IsFirstComponentList = true;
- for (auto *C : Directive.getClausesOfKind<OMPMapClause>())
+ // If this declaration appears in a is_device_ptr clause we just have to
+ // pass the pointer by value. If it is a reference to a declaration, we just
+ // pass its value, otherwise, if it is a member expression, we need to map
+ // 'to' the field.
+ if (!VD) {
+ auto It = DevPointersMap.find(VD);
+ if (It != DevPointersMap.end()) {
+ for (auto L : It->second) {
+ generateInfoForComponentList(
+ /*MapType=*/OMPC_MAP_to, /*MapTypeModifier=*/OMPC_MAP_unknown, L,
+ BasePointers, Pointers, Sizes, Types, IsFirstComponentList);
+ IsFirstComponentList = false;
+ }
+ return;
+ }
+ } else if (DevPointersMap.count(VD)) {
+ BasePointers.push_back({Arg, VD});
+ Pointers.push_back(Arg);
+ Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
+ Types.push_back(OMP_MAP_PRIVATE_VAL | OMP_MAP_FIRST_REF);
+ return;
+ }
+
+ // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
+ for (auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
for (auto L : C->decl_component_lists(VD)) {
assert(L.first == VD &&
"We got information for the wrong declaration??");
@@ -5478,12 +5667,12 @@ public:
/// \brief Generate the default map information for a given capture \a CI,
/// record field declaration \a RI and captured value \a CV.
- void generateDefaultMapInfo(
- const CapturedStmt::Capture &CI, const FieldDecl &RI, llvm::Value *CV,
- MappableExprsHandler::MapValuesArrayTy &CurBasePointers,
- MappableExprsHandler::MapValuesArrayTy &CurPointers,
- MappableExprsHandler::MapValuesArrayTy &CurSizes,
- MappableExprsHandler::MapFlagsArrayTy &CurMapTypes) {
+ void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
+ const FieldDecl &RI, llvm::Value *CV,
+ MapBaseValuesArrayTy &CurBasePointers,
+ MapValuesArrayTy &CurPointers,
+ MapValuesArrayTy &CurSizes,
+ MapFlagsArrayTy &CurMapTypes) {
// Do the default mapping.
if (CI.capturesThis()) {
@@ -5492,15 +5681,14 @@ public:
const PointerType *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType()));
// Default map type.
- CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_TO |
- MappableExprsHandler::OMP_MAP_FROM);
+ CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
} else if (CI.capturesVariableByCopy()) {
CurBasePointers.push_back(CV);
CurPointers.push_back(CV);
if (!RI.getType()->isAnyPointerType()) {
// We have to signal to the runtime captures passed by value that are
// not pointers.
- CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_PRIVATE_VAL);
+ CurMapTypes.push_back(OMP_MAP_PRIVATE_VAL);
CurSizes.push_back(CGF.getTypeSize(RI.getType()));
} else {
// Pointers are implicitly mapped with a zero size and no flags
@@ -5521,9 +5709,8 @@ public:
// default the value doesn't have to be retrieved. For an aggregate
// type, the default is 'tofrom'.
CurMapTypes.push_back(ElementType->isAggregateType()
- ? (MappableExprsHandler::OMP_MAP_TO |
- MappableExprsHandler::OMP_MAP_FROM)
- : MappableExprsHandler::OMP_MAP_TO);
+ ? (OMP_MAP_TO | OMP_MAP_FROM)
+ : OMP_MAP_TO);
// If we have a capture by reference we may need to add the private
// pointer flag if the base declaration shows in some first-private
@@ -5533,7 +5720,7 @@ public:
}
// Every default map produces a single argument, so, it is always the
// first one.
- CurMapTypes.back() |= MappableExprsHandler::OMP_MAP_FIRST_REF;
+ CurMapTypes.back() |= OMP_MAP_FIRST_REF;
}
};
@@ -5548,19 +5735,20 @@ enum OpenMPOffloadingReservedDeviceIDs {
/// offloading runtime library. If there is no map or capture information,
/// return nullptr by reference.
static void
-emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
- llvm::Value *&PointersArray, llvm::Value *&SizesArray,
- llvm::Value *&MapTypesArray,
- MappableExprsHandler::MapValuesArrayTy &BasePointers,
+emitOffloadingArrays(CodeGenFunction &CGF,
+ MappableExprsHandler::MapBaseValuesArrayTy &BasePointers,
MappableExprsHandler::MapValuesArrayTy &Pointers,
MappableExprsHandler::MapValuesArrayTy &Sizes,
- MappableExprsHandler::MapFlagsArrayTy &MapTypes) {
+ MappableExprsHandler::MapFlagsArrayTy &MapTypes,
+ CGOpenMPRuntime::TargetDataInfo &Info) {
auto &CGM = CGF.CGM;
auto &Ctx = CGF.getContext();
- BasePointersArray = PointersArray = SizesArray = MapTypesArray = nullptr;
+ // Reset the array information.
+ Info.clearArrayInfo();
+ Info.NumberOfPtrs = BasePointers.size();
- if (unsigned PointerNumVal = BasePointers.size()) {
+ if (Info.NumberOfPtrs) {
// Detect if we have any capture size requiring runtime evaluation of the
// size so that a constant array could be eventually used.
bool hasRuntimeEvaluationCaptureSize = false;
@@ -5570,14 +5758,14 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
break;
}
- llvm::APInt PointerNumAP(32, PointerNumVal, /*isSigned=*/true);
+ llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
QualType PointerArrayType =
Ctx.getConstantArrayType(Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal,
/*IndexTypeQuals=*/0);
- BasePointersArray =
+ Info.BasePointersArray =
CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
- PointersArray =
+ Info.PointersArray =
CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
// If we don't have any VLA types or other types that require runtime
@@ -5587,7 +5775,7 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
QualType SizeArrayType = Ctx.getConstantArrayType(
Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
/*IndexTypeQuals=*/0);
- SizesArray =
+ Info.SizesArray =
CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
} else {
// We expect all the sizes to be constant, so we collect them to create
@@ -5603,7 +5791,7 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
/*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
SizesArrayInit, ".offload_sizes");
SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- SizesArray = SizesArrayGbl;
+ Info.SizesArray = SizesArrayGbl;
}
// The map types are always constant so we don't need to generate code to
@@ -5615,10 +5803,10 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
/*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
MapTypesArrayInit, ".offload_maptypes");
MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- MapTypesArray = MapTypesArrayGbl;
+ Info.MapTypesArray = MapTypesArrayGbl;
- for (unsigned i = 0; i < PointerNumVal; ++i) {
- llvm::Value *BPVal = BasePointers[i];
+ for (unsigned i = 0; i < Info.NumberOfPtrs; ++i) {
+ llvm::Value *BPVal = *BasePointers[i];
if (BPVal->getType()->isPointerTy())
BPVal = CGF.Builder.CreateBitCast(BPVal, CGM.VoidPtrTy);
else {
@@ -5627,11 +5815,15 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
BPVal = CGF.Builder.CreateIntToPtr(BPVal, CGM.VoidPtrTy);
}
llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal), BasePointersArray,
- 0, i);
+ llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
+ Info.BasePointersArray, 0, i);
Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
CGF.Builder.CreateStore(BPVal, BPAddr);
+ if (Info.requiresDevicePointerInfo())
+ if (auto *DevVD = BasePointers[i].getDevicePtrDecl())
+ Info.CaptureDeviceAddrMap.insert(std::make_pair(DevVD, BPAddr));
+
llvm::Value *PVal = Pointers[i];
if (PVal->getType()->isPointerTy())
PVal = CGF.Builder.CreateBitCast(PVal, CGM.VoidPtrTy);
@@ -5641,14 +5833,15 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
PVal = CGF.Builder.CreateIntToPtr(PVal, CGM.VoidPtrTy);
}
llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal), PointersArray, 0,
- i);
+ llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
+ Info.PointersArray, 0, i);
Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
CGF.Builder.CreateStore(PVal, PAddr);
if (hasRuntimeEvaluationCaptureSize) {
llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.SizeTy, PointerNumVal), SizesArray,
+ llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs),
+ Info.SizesArray,
/*Idx0=*/0,
/*Idx1=*/i);
Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
@@ -5664,23 +5857,24 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
static void emitOffloadingArraysArgument(
CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
- llvm::Value *&MapTypesArrayArg, llvm::Value *BasePointersArray,
- llvm::Value *PointersArray, llvm::Value *SizesArray,
- llvm::Value *MapTypesArray, unsigned NumElems) {
+ llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) {
auto &CGM = CGF.CGM;
- if (NumElems) {
+ if (Info.NumberOfPtrs) {
BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, NumElems), BasePointersArray,
+ llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
+ Info.BasePointersArray,
/*Idx0=*/0, /*Idx1=*/0);
PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, NumElems), PointersArray,
+ llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
+ Info.PointersArray,
/*Idx0=*/0,
/*Idx1=*/0);
SizesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.SizeTy, NumElems), SizesArray,
+ llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs), Info.SizesArray,
/*Idx0=*/0, /*Idx1=*/0);
MapTypesArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.Int32Ty, NumElems), MapTypesArray,
+ llvm::ArrayType::get(CGM.Int32Ty, Info.NumberOfPtrs),
+ Info.MapTypesArray,
/*Idx0=*/0,
/*Idx1=*/0);
} else {
@@ -5707,12 +5901,12 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
// Fill up the arrays with all the captured variables.
MappableExprsHandler::MapValuesArrayTy KernelArgs;
- MappableExprsHandler::MapValuesArrayTy BasePointers;
+ MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
MappableExprsHandler::MapValuesArrayTy Pointers;
MappableExprsHandler::MapValuesArrayTy Sizes;
MappableExprsHandler::MapFlagsArrayTy MapTypes;
- MappableExprsHandler::MapValuesArrayTy CurBasePointers;
+ MappableExprsHandler::MapBaseValuesArrayTy CurBasePointers;
MappableExprsHandler::MapValuesArrayTy CurPointers;
MappableExprsHandler::MapValuesArrayTy CurSizes;
MappableExprsHandler::MapFlagsArrayTy CurMapTypes;
@@ -5746,7 +5940,7 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
} else {
// If we have any information in the map clause, we use it, otherwise we
// just do a default mapping.
- MEHandler.generateInfoForCapture(CI, CurBasePointers, CurPointers,
+ MEHandler.generateInfoForCapture(CI, *CV, CurBasePointers, CurPointers,
CurSizes, CurMapTypes);
if (CurBasePointers.empty())
MEHandler.generateDefaultMapInfo(*CI, **RI, *CV, CurBasePointers,
@@ -5761,7 +5955,7 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
// The kernel args are always the first elements of the base pointers
// associated with a capture.
- KernelArgs.push_back(CurBasePointers.front());
+ KernelArgs.push_back(*CurBasePointers.front());
// We need to append the results of this capture to what we already have.
BasePointers.append(CurBasePointers.begin(), CurBasePointers.end());
Pointers.append(CurPointers.begin(), CurPointers.end());
@@ -5784,17 +5978,11 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
&D](CodeGenFunction &CGF, PrePostActionTy &) {
auto &RT = CGF.CGM.getOpenMPRuntime();
// Emit the offloading arrays.
- llvm::Value *BasePointersArray;
- llvm::Value *PointersArray;
- llvm::Value *SizesArray;
- llvm::Value *MapTypesArray;
- emitOffloadingArrays(CGF, BasePointersArray, PointersArray, SizesArray,
- MapTypesArray, BasePointers, Pointers, Sizes,
- MapTypes);
- emitOffloadingArraysArgument(CGF, BasePointersArray, PointersArray,
- SizesArray, MapTypesArray, BasePointersArray,
- PointersArray, SizesArray, MapTypesArray,
- BasePointers.size());
+ TargetDataInfo Info;
+ emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
+ emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
+ Info.PointersArray, Info.SizesArray,
+ Info.MapTypesArray, Info);
// On top of the arrays that were filled up, the target offloading call
// takes as arguments the device id as well as the host pointer. The host
@@ -5835,15 +6023,19 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
assert(ThreadLimit && "Thread limit expression should be available along "
"with number of teams.");
llvm::Value *OffloadingArgs[] = {
- DeviceID, OutlinedFnID, PointerNum,
- BasePointersArray, PointersArray, SizesArray,
- MapTypesArray, NumTeams, ThreadLimit};
+ DeviceID, OutlinedFnID,
+ PointerNum, Info.BasePointersArray,
+ Info.PointersArray, Info.SizesArray,
+ Info.MapTypesArray, NumTeams,
+ ThreadLimit};
Return = CGF.EmitRuntimeCall(
RT.createRuntimeFunction(OMPRTL__tgt_target_teams), OffloadingArgs);
} else {
llvm::Value *OffloadingArgs[] = {
- DeviceID, OutlinedFnID, PointerNum, BasePointersArray,
- PointersArray, SizesArray, MapTypesArray};
+ DeviceID, OutlinedFnID,
+ PointerNum, Info.BasePointersArray,
+ Info.PointersArray, Info.SizesArray,
+ Info.MapTypesArray};
Return = CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__tgt_target),
OffloadingArgs);
}
@@ -5951,7 +6143,7 @@ bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
// Try to detect target regions in the function.
scanForTargetRegionsFunctions(FD.getBody(), CGM.getMangledName(GD));
- // We should not emit any function othen that the ones created during the
+ // We should not emit any function other that the ones created during the
// scanning. Therefore, we signal that this function is completely dealt
// with.
return true;
@@ -6055,29 +6247,23 @@ void CGOpenMPRuntime::emitNumTeamsClause(CodeGenFunction &CGF,
PushNumTeamsArgs);
}
-void CGOpenMPRuntime::emitTargetDataCalls(CodeGenFunction &CGF,
- const OMPExecutableDirective &D,
- const Expr *IfCond,
- const Expr *Device,
- const RegionCodeGenTy &CodeGen) {
-
+void CGOpenMPRuntime::emitTargetDataCalls(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond,
+ const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) {
if (!CGF.HaveInsertPoint())
return;
- llvm::Value *BasePointersArray = nullptr;
- llvm::Value *PointersArray = nullptr;
- llvm::Value *SizesArray = nullptr;
- llvm::Value *MapTypesArray = nullptr;
- unsigned NumOfPtrs = 0;
+ // Action used to replace the default codegen action and turn privatization
+ // off.
+ PrePostActionTy NoPrivAction;
// Generate the code for the opening of the data environment. Capture all the
// arguments of the runtime call by reference because they are used in the
// closing of the region.
- auto &&BeginThenGen = [&D, &CGF, &BasePointersArray, &PointersArray,
- &SizesArray, &MapTypesArray, Device,
- &NumOfPtrs](CodeGenFunction &CGF, PrePostActionTy &) {
+ auto &&BeginThenGen = [&D, &CGF, Device, &Info, &CodeGen, &NoPrivAction](
+ CodeGenFunction &CGF, PrePostActionTy &) {
// Fill up the arrays with all the mapped variables.
- MappableExprsHandler::MapValuesArrayTy BasePointers;
+ MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
MappableExprsHandler::MapValuesArrayTy Pointers;
MappableExprsHandler::MapValuesArrayTy Sizes;
MappableExprsHandler::MapFlagsArrayTy MapTypes;
@@ -6085,21 +6271,16 @@ void CGOpenMPRuntime::emitTargetDataCalls(CodeGenFunction &CGF,
// Get map clause information.
MappableExprsHandler MCHandler(D, CGF);
MCHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
- NumOfPtrs = BasePointers.size();
// Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, BasePointersArray, PointersArray, SizesArray,
- MapTypesArray, BasePointers, Pointers, Sizes,
- MapTypes);
+ emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
llvm::Value *BasePointersArrayArg = nullptr;
llvm::Value *PointersArrayArg = nullptr;
llvm::Value *SizesArrayArg = nullptr;
llvm::Value *MapTypesArrayArg = nullptr;
emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
- SizesArrayArg, MapTypesArrayArg,
- BasePointersArray, PointersArray, SizesArray,
- MapTypesArray, NumOfPtrs);
+ SizesArrayArg, MapTypesArrayArg, Info);
// Emit device ID if any.
llvm::Value *DeviceID = nullptr;
@@ -6110,7 +6291,7 @@ void CGOpenMPRuntime::emitTargetDataCalls(CodeGenFunction &CGF,
DeviceID = CGF.Builder.getInt32(OMP_DEVICEID_UNDEF);
// Emit the number of elements in the offloading arrays.
- auto *PointerNum = CGF.Builder.getInt32(NumOfPtrs);
+ auto *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
llvm::Value *OffloadingArgs[] = {
DeviceID, PointerNum, BasePointersArrayArg,
@@ -6118,23 +6299,24 @@ void CGOpenMPRuntime::emitTargetDataCalls(CodeGenFunction &CGF,
auto &RT = CGF.CGM.getOpenMPRuntime();
CGF.EmitRuntimeCall(RT.createRuntimeFunction(OMPRTL__tgt_target_data_begin),
OffloadingArgs);
+
+ // If device pointer privatization is required, emit the body of the region
+ // here. It will have to be duplicated: with and without privatization.
+ if (!Info.CaptureDeviceAddrMap.empty())
+ CodeGen(CGF);
};
// Generate code for the closing of the data region.
- auto &&EndThenGen = [&CGF, &BasePointersArray, &PointersArray, &SizesArray,
- &MapTypesArray, Device,
- &NumOfPtrs](CodeGenFunction &CGF, PrePostActionTy &) {
- assert(BasePointersArray && PointersArray && SizesArray && MapTypesArray &&
- NumOfPtrs && "Invalid data environment closing arguments.");
+ auto &&EndThenGen = [&CGF, Device, &Info](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+ assert(Info.isValid() && "Invalid data environment closing arguments.");
llvm::Value *BasePointersArrayArg = nullptr;
llvm::Value *PointersArrayArg = nullptr;
llvm::Value *SizesArrayArg = nullptr;
llvm::Value *MapTypesArrayArg = nullptr;
emitOffloadingArraysArgument(CGF, BasePointersArrayArg, PointersArrayArg,
- SizesArrayArg, MapTypesArrayArg,
- BasePointersArray, PointersArray, SizesArray,
- MapTypesArray, NumOfPtrs);
+ SizesArrayArg, MapTypesArrayArg, Info);
// Emit device ID if any.
llvm::Value *DeviceID = nullptr;
@@ -6145,7 +6327,7 @@ void CGOpenMPRuntime::emitTargetDataCalls(CodeGenFunction &CGF,
DeviceID = CGF.Builder.getInt32(OMP_DEVICEID_UNDEF);
// Emit the number of elements in the offloading arrays.
- auto *PointerNum = CGF.Builder.getInt32(NumOfPtrs);
+ auto *PointerNum = CGF.Builder.getInt32(Info.NumberOfPtrs);
llvm::Value *OffloadingArgs[] = {
DeviceID, PointerNum, BasePointersArrayArg,
@@ -6155,24 +6337,40 @@ void CGOpenMPRuntime::emitTargetDataCalls(CodeGenFunction &CGF,
OffloadingArgs);
};
- // In the event we get an if clause, we don't have to take any action on the
- // else side.
- auto &&ElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
+ // If we need device pointer privatization, we need to emit the body of the
+ // region with no privatization in the 'else' branch of the conditional.
+ // Otherwise, we don't have to do anything.
+ auto &&BeginElseGen = [&Info, &CodeGen, &NoPrivAction](CodeGenFunction &CGF,
+ PrePostActionTy &) {
+ if (!Info.CaptureDeviceAddrMap.empty()) {
+ CodeGen.setAction(NoPrivAction);
+ CodeGen(CGF);
+ }
+ };
+
+ // We don't have to do anything to close the region if the if clause evaluates
+ // to false.
+ auto &&EndElseGen = [](CodeGenFunction &CGF, PrePostActionTy &) {};
if (IfCond) {
- emitOMPIfClause(CGF, IfCond, BeginThenGen, ElseGen);
+ emitOMPIfClause(CGF, IfCond, BeginThenGen, BeginElseGen);
} else {
- RegionCodeGenTy BeginThenRCG(BeginThenGen);
- BeginThenRCG(CGF);
+ RegionCodeGenTy RCG(BeginThenGen);
+ RCG(CGF);
}
- CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data, CodeGen);
+ // If we don't require privatization of device pointers, we emit the body in
+ // between the runtime calls. This avoids duplicating the body code.
+ if (Info.CaptureDeviceAddrMap.empty()) {
+ CodeGen.setAction(NoPrivAction);
+ CodeGen(CGF);
+ }
if (IfCond) {
- emitOMPIfClause(CGF, IfCond, EndThenGen, ElseGen);
+ emitOMPIfClause(CGF, IfCond, EndThenGen, EndElseGen);
} else {
- RegionCodeGenTy EndThenRCG(EndThenGen);
- EndThenRCG(CGF);
+ RegionCodeGenTy RCG(EndThenGen);
+ RCG(CGF);
}
}
@@ -6190,7 +6388,7 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
// Generate the code for the opening of the data environment.
auto &&ThenGen = [&D, &CGF, Device](CodeGenFunction &CGF, PrePostActionTy &) {
// Fill up the arrays with all the mapped variables.
- MappableExprsHandler::MapValuesArrayTy BasePointers;
+ MappableExprsHandler::MapBaseValuesArrayTy BasePointers;
MappableExprsHandler::MapValuesArrayTy Pointers;
MappableExprsHandler::MapValuesArrayTy Sizes;
MappableExprsHandler::MapFlagsArrayTy MapTypes;
@@ -6199,19 +6397,12 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
MappableExprsHandler MEHandler(D, CGF);
MEHandler.generateAllInfo(BasePointers, Pointers, Sizes, MapTypes);
- llvm::Value *BasePointersArrayArg = nullptr;
- llvm::Value *PointersArrayArg = nullptr;
- llvm::Value *SizesArrayArg = nullptr;
- llvm::Value *MapTypesArrayArg = nullptr;
-
// Fill up the arrays and create the arguments.
- emitOffloadingArrays(CGF, BasePointersArrayArg, PointersArrayArg,
- SizesArrayArg, MapTypesArrayArg, BasePointers,
- Pointers, Sizes, MapTypes);
- emitOffloadingArraysArgument(
- CGF, BasePointersArrayArg, PointersArrayArg, SizesArrayArg,
- MapTypesArrayArg, BasePointersArrayArg, PointersArrayArg, SizesArrayArg,
- MapTypesArrayArg, BasePointers.size());
+ TargetDataInfo Info;
+ emitOffloadingArrays(CGF, BasePointers, Pointers, Sizes, MapTypes, Info);
+ emitOffloadingArraysArgument(CGF, Info.BasePointersArray,
+ Info.PointersArray, Info.SizesArray,
+ Info.MapTypesArray, Info);
// Emit device ID if any.
llvm::Value *DeviceID = nullptr;
@@ -6225,8 +6416,8 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
auto *PointerNum = CGF.Builder.getInt32(BasePointers.size());
llvm::Value *OffloadingArgs[] = {
- DeviceID, PointerNum, BasePointersArrayArg,
- PointersArrayArg, SizesArrayArg, MapTypesArrayArg};
+ DeviceID, PointerNum, Info.BasePointersArray,
+ Info.PointersArray, Info.SizesArray, Info.MapTypesArray};
auto &RT = CGF.CGM.getOpenMPRuntime();
// Select the right runtime function call for each expected standalone
@@ -6326,7 +6517,7 @@ static unsigned evaluateCDTSize(const FunctionDecl *FD,
static void
emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
- llvm::APSInt VLENVal,
+ const llvm::APSInt &VLENVal,
ArrayRef<ParamAttrTy> ParamAttrs,
OMPDeclareSimdDeclAttr::BranchStateTy State) {
struct ISADataTy {
diff --git a/lib/CodeGen/CGOpenMPRuntime.h b/lib/CodeGen/CGOpenMPRuntime.h
index 270de8dd505e..9057e5ec4c14 100644
--- a/lib/CodeGen/CGOpenMPRuntime.h
+++ b/lib/CodeGen/CGOpenMPRuntime.h
@@ -997,17 +997,59 @@ public:
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
+ /// Struct that keeps all the relevant information that should be kept
+ /// throughout a 'target data' region.
+ class TargetDataInfo {
+ /// Set to true if device pointer information have to be obtained.
+ bool RequiresDevicePointerInfo = false;
+
+ public:
+ /// The array of base pointer passed to the runtime library.
+ llvm::Value *BasePointersArray = nullptr;
+ /// The array of section pointers passed to the runtime library.
+ llvm::Value *PointersArray = nullptr;
+ /// The array of sizes passed to the runtime library.
+ llvm::Value *SizesArray = nullptr;
+ /// The array of map types passed to the runtime library.
+ llvm::Value *MapTypesArray = nullptr;
+ /// The total number of pointers passed to the runtime library.
+ unsigned NumberOfPtrs = 0u;
+ /// Map between the a declaration of a capture and the corresponding base
+ /// pointer address where the runtime returns the device pointers.
+ llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
+
+ explicit TargetDataInfo() {}
+ explicit TargetDataInfo(bool RequiresDevicePointerInfo)
+ : RequiresDevicePointerInfo(RequiresDevicePointerInfo) {}
+ /// Clear information about the data arrays.
+ void clearArrayInfo() {
+ BasePointersArray = nullptr;
+ PointersArray = nullptr;
+ SizesArray = nullptr;
+ MapTypesArray = nullptr;
+ NumberOfPtrs = 0u;
+ }
+ /// Return true if the current target data information has valid arrays.
+ bool isValid() {
+ return BasePointersArray && PointersArray && SizesArray &&
+ MapTypesArray && NumberOfPtrs;
+ }
+ bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
+ };
+
/// \brief Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
- /// \param IfCond Expression evaluated in if clause associated with the target
- /// directive, or null if no if clause is used.
+ /// \param IfCond Expression evaluated in if clause associated with the
+ /// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
- /// \param CodeGen Function that emits the enclosed region.
+ /// \param Info A record used to store information that needs to be preserved
+ /// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
- const RegionCodeGenTy &CodeGen);
+ const RegionCodeGenTy &CodeGen,
+ TargetDataInfo &Info);
/// \brief Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
index d64f6df72012..451f9e9221ad 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
@@ -244,6 +244,9 @@ void CGOpenMPRuntimeNVPTX::emitEntryHeader(CodeGenFunction &CGF,
void CGOpenMPRuntimeNVPTX::emitEntryFooter(CodeGenFunction &CGF,
EntryFunctionState &EST) {
+ if (!EST.ExitBB)
+ EST.ExitBB = CGF.createBasicBlock(".exit");
+
CGBuilderTy &Bld = CGF.Builder;
llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
CGF.EmitBranch(TerminateBB);
@@ -259,6 +262,7 @@ void CGOpenMPRuntimeNVPTX::emitEntryFooter(CodeGenFunction &CGF,
CGF.EmitBranch(EST.ExitBB);
CGF.EmitBlock(EST.ExitBB);
+ EST.ExitBB = nullptr;
}
/// \brief Returns specified OpenMP runtime function for the current OpenMP
@@ -368,6 +372,7 @@ llvm::Value *CGOpenMPRuntimeNVPTX::emitParallelOrTeamsOutlinedFunction(
CGOpenMPRuntime::emitParallelOrTeamsOutlinedFunction(
D, ThreadIDVar, InnermostKind, CodeGen);
OutlinedFun = cast<llvm::Function>(OutlinedFunVal);
+ OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
} else
llvm_unreachable("parallel directive is not yet supported for nvptx "
diff --git a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
index a6c64b2f6d67..e18d28cdda9f 100644
--- a/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ b/lib/CodeGen/CGOpenMPRuntimeNVPTX.h
@@ -25,11 +25,8 @@ namespace CodeGen {
class CGOpenMPRuntimeNVPTX : public CGOpenMPRuntime {
public:
- class EntryFunctionState {
- public:
- llvm::BasicBlock *ExitBB;
-
- EntryFunctionState() : ExitBB(nullptr){};
+ struct EntryFunctionState {
+ llvm::BasicBlock *ExitBB = nullptr;
};
class WorkerFunctionState {
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index d815863e929d..f2acb798b881 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -142,6 +142,8 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::GCCAsmStmtClass: // Intentional fall-through.
case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
case Stmt::CoroutineBodyStmtClass:
+ EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
+ break;
case Stmt::CoreturnStmtClass:
CGM.ErrorUnsupported(S, "coroutine");
break;
@@ -295,6 +297,35 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
EmitOMPTargetParallelForSimdDirective(
cast<OMPTargetParallelForSimdDirective>(*S));
break;
+ case Stmt::OMPTargetSimdDirectiveClass:
+ EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
+ break;
+ case Stmt::OMPTeamsDistributeDirectiveClass:
+ EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
+ break;
+ case Stmt::OMPTeamsDistributeSimdDirectiveClass:
+ EmitOMPTeamsDistributeSimdDirective(
+ cast<OMPTeamsDistributeSimdDirective>(*S));
+ break;
+ case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
+ EmitOMPTeamsDistributeParallelForSimdDirective(
+ cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
+ break;
+ case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
+ EmitOMPTeamsDistributeParallelForDirective(
+ cast<OMPTeamsDistributeParallelForDirective>(*S));
+ break;
+ case Stmt::OMPTargetTeamsDirectiveClass:
+ EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
+ break;
+ case Stmt::OMPTargetTeamsDistributeDirectiveClass:
+ EmitOMPTargetTeamsDistributeDirective(
+ cast<OMPTargetTeamsDistributeDirective>(*S));
+ break;
+ case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
+ EmitOMPTargetTeamsDistributeParallelForDirective(
+ cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
+ break;
}
}
@@ -651,8 +682,10 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
EmitBlock(LoopHeader.getBlock());
+ const SourceRange &R = S.getSourceRange();
LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), WhileAttrs,
- Builder.getCurrentDebugLocation());
+ SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
// Create an exit block for when the condition fails, which will
// also become the break target.
@@ -743,8 +776,10 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// Emit the body of the loop.
llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+ const SourceRange &R = S.getSourceRange();
LoopStack.push(LoopBody, CGM.getContext(), DoAttrs,
- Builder.getCurrentDebugLocation());
+ SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
EmitBlockWithFallThrough(LoopBody, &S);
{
@@ -796,8 +831,6 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
LexicalScope ForScope(*this, S.getSourceRange());
- llvm::DebugLoc DL = Builder.getCurrentDebugLocation();
-
// Evaluate the first part before the loop.
if (S.getInit())
EmitStmt(S.getInit());
@@ -809,7 +842,10 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
llvm::BasicBlock *CondBlock = Continue.getBlock();
EmitBlock(CondBlock);
- LoopStack.push(CondBlock, CGM.getContext(), ForAttrs, DL);
+ const SourceRange &R = S.getSourceRange();
+ LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
+ SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
// If the for loop doesn't have an increment we can just use the
// condition as the continue block. Otherwise we'll need to create
@@ -894,8 +930,6 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
LexicalScope ForScope(*this, S.getSourceRange());
- llvm::DebugLoc DL = Builder.getCurrentDebugLocation();
-
// Evaluate the first pieces before the loop.
EmitStmt(S.getRangeStmt());
EmitStmt(S.getBeginStmt());
@@ -907,7 +941,10 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
EmitBlock(CondBlock);
- LoopStack.push(CondBlock, CGM.getContext(), ForAttrs, DL);
+ const SourceRange &R = S.getSourceRange();
+ LoopStack.push(CondBlock, CGM.getContext(), ForAttrs,
+ SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
@@ -2085,15 +2122,6 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Result->addAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::NoUnwind);
- if (isa<MSAsmStmt>(&S)) {
- // If the assembly contains any labels, mark the call noduplicate to prevent
- // defining the same ASM label twice (PR23715). This is pretty hacky, but it
- // works.
- if (AsmString.find("__MSASMLABEL_") != std::string::npos)
- Result->addAttribute(llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::NoDuplicate);
- }
-
// Attach readnone and readonly attributes.
if (!HasSideEffect) {
if (ReadNone)
@@ -2189,7 +2217,7 @@ LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
auto VAT = CurField->getCapturedVLAType();
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
} else {
- EmitInitializerForField(*CurField, LV, *I, None);
+ EmitInitializerForField(*CurField, LV, *I);
}
}
diff --git a/lib/CodeGen/CGStmtOpenMP.cpp b/lib/CodeGen/CGStmtOpenMP.cpp
index d214340bdafe..ba39e1fbd41f 100644
--- a/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/lib/CodeGen/CGStmtOpenMP.cpp
@@ -188,7 +188,7 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, QualType DstType,
auto *RefVal = TmpAddr.getPointer();
TmpAddr = CGF.CreateMemTemp(RefType, Twine(Name) + ".ref");
auto TmpLVal = CGF.MakeAddrLValue(TmpAddr, RefType);
- CGF.EmitScalarInit(RefVal, TmpLVal);
+ CGF.EmitStoreThroughLValue(RValue::get(RefVal), TmpLVal, /*isInit*/ true);
}
return TmpAddr;
@@ -271,7 +271,17 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S) {
// If we are capturing a pointer by copy we don't need to do anything, just
// use the value that we get from the arguments.
if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
- setAddrOfLocalVar(I->getCapturedVar(), GetAddrOfLocalVar(Args[Cnt]));
+ const VarDecl *CurVD = I->getCapturedVar();
+ Address LocalAddr = GetAddrOfLocalVar(Args[Cnt]);
+ // If the variable is a reference we need to materialize it here.
+ if (CurVD->getType()->isReferenceType()) {
+ Address RefAddr = CreateMemTemp(CurVD->getType(), getPointerAlign(),
+ ".materialized_ref");
+ EmitStoreOfScalar(LocalAddr.getPointer(), RefAddr, /*Volatile=*/false,
+ CurVD->getType());
+ LocalAddr = RefAddr;
+ }
+ setAddrOfLocalVar(CurVD, LocalAddr);
++Cnt;
++I;
continue;
@@ -1294,7 +1304,9 @@ void CodeGenFunction::EmitOMPInnerLoop(
// Start the loop with a block that tests the condition.
auto CondBlock = createBasicBlock("omp.inner.for.cond");
EmitBlock(CondBlock);
- LoopStack.push(CondBlock, Builder.getCurrentDebugLocation());
+ const SourceRange &R = S.getSourceRange();
+ LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
@@ -1695,7 +1707,9 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
// Start the loop with a block that tests the condition.
auto CondBlock = createBasicBlock("omp.dispatch.cond");
EmitBlock(CondBlock);
- LoopStack.push(CondBlock, Builder.getCurrentDebugLocation());
+ const SourceRange &R = S.getSourceRange();
+ LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
+ SourceLocToDebugLoc(R.getEnd()));
llvm::Value *BoolCondVal = nullptr;
if (!DynamicOrOrdered) {
@@ -1930,6 +1944,94 @@ void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
});
}
+void CodeGenFunction::EmitOMPTargetSimdDirective(
+ const OMPTargetSimdDirective &S) {
+ OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_target_simd, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ OMPLoopScope PreInitScope(CGF, S);
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ });
+}
+
+void CodeGenFunction::EmitOMPTeamsDistributeDirective(
+ const OMPTeamsDistributeDirective &S) {
+ OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_teams_distribute,
+ [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ OMPLoopScope PreInitScope(CGF, S);
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ });
+}
+
+void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
+ const OMPTeamsDistributeSimdDirective &S) {
+ OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_teams_distribute_simd,
+ [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ OMPLoopScope PreInitScope(CGF, S);
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ });
+}
+
+void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
+ const OMPTeamsDistributeParallelForSimdDirective &S) {
+ OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_teams_distribute_parallel_for_simd,
+ [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ OMPLoopScope PreInitScope(CGF, S);
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ });
+}
+
+void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
+ const OMPTeamsDistributeParallelForDirective &S) {
+ OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_teams_distribute_parallel_for,
+ [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ OMPLoopScope PreInitScope(CGF, S);
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ });
+}
+
+void CodeGenFunction::EmitOMPTargetTeamsDirective(
+ const OMPTargetTeamsDirective &S) {
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_target_teams, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ });
+}
+
+void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
+ const OMPTargetTeamsDistributeDirective &S) {
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_target_teams_distribute,
+ [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ });
+}
+
+void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
+ const OMPTargetTeamsDistributeParallelForDirective &S) {
+ CGM.getOpenMPRuntime().emitInlinedDirective(
+ *this, OMPD_target_teams_distribute_parallel_for,
+ [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ });
+}
+
/// \brief Emit a helper variable and return corresponding lvalue.
static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
const DeclRefExpr *Helper) {
@@ -2167,7 +2269,7 @@ static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
llvm::Value *Init = nullptr) {
auto LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
if (Init)
- CGF.EmitScalarInit(Init, LVal);
+ CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
return LVal;
}
@@ -2451,10 +2553,8 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
}
// Check if the task has 'priority' clause.
if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
- // Runtime currently does not support codegen for priority clause argument.
- // TODO: Add codegen for priority clause arg when runtime lib support it.
auto *Prio = Clause->getPriority();
- Data.Priority.setInt(Prio);
+ Data.Priority.setInt(/*IntVal=*/true);
Data.Priority.setPointer(EmitScalarConversion(
EmitScalarExpr(Prio), Prio->getType(),
getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
@@ -3368,7 +3468,7 @@ static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
}
void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
- // Emit parallel region as a standalone region.
+ // Emit teams region as a standalone region.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
OMPPrivateScope PrivateScope(CGF);
(void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
@@ -3410,22 +3510,137 @@ CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
return OMPCancelStack.getExitBlock();
}
+void CodeGenFunction::EmitOMPUseDevicePtrClause(
+ const OMPClause &NC, OMPPrivateScope &PrivateScope,
+ const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap) {
+ const auto &C = cast<OMPUseDevicePtrClause>(NC);
+ auto OrigVarIt = C.varlist_begin();
+ auto InitIt = C.inits().begin();
+ for (auto PvtVarIt : C.private_copies()) {
+ auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*OrigVarIt)->getDecl());
+ auto *InitVD = cast<VarDecl>(cast<DeclRefExpr>(*InitIt)->getDecl());
+ auto *PvtVD = cast<VarDecl>(cast<DeclRefExpr>(PvtVarIt)->getDecl());
+
+ // In order to identify the right initializer we need to match the
+ // declaration used by the mapping logic. In some cases we may get
+ // OMPCapturedExprDecl that refers to the original declaration.
+ const ValueDecl *MatchingVD = OrigVD;
+ if (auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
+ // OMPCapturedExprDecl are used to privative fields of the current
+ // structure.
+ auto *ME = cast<MemberExpr>(OED->getInit());
+ assert(isa<CXXThisExpr>(ME->getBase()) &&
+ "Base should be the current struct!");
+ MatchingVD = ME->getMemberDecl();
+ }
+
+ // If we don't have information about the current list item, move on to
+ // the next one.
+ auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
+ if (InitAddrIt == CaptureDeviceAddrMap.end())
+ continue;
+
+ bool IsRegistered = PrivateScope.addPrivate(OrigVD, [&]() -> Address {
+ // Initialize the temporary initialization variable with the address we
+ // get from the runtime library. We have to cast the source address
+ // because it is always a void *. References are materialized in the
+ // privatization scope, so the initialization here disregards the fact
+ // the original variable is a reference.
+ QualType AddrQTy =
+ getContext().getPointerType(OrigVD->getType().getNonReferenceType());
+ llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
+ Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
+ setAddrOfLocalVar(InitVD, InitAddr);
+
+ // Emit private declaration, it will be initialized by the value we
+ // declaration we just added to the local declarations map.
+ EmitDecl(*PvtVD);
+
+ // The initialization variables reached its purpose in the emission
+ // ofthe previous declaration, so we don't need it anymore.
+ LocalDeclMap.erase(InitVD);
+
+ // Return the address of the private variable.
+ return GetAddrOfLocalVar(PvtVD);
+ });
+ assert(IsRegistered && "firstprivate var already registered as private");
+ // Silence the warning about unused variable.
+ (void)IsRegistered;
+
+ ++OrigVarIt;
+ ++InitIt;
+ }
+}
+
// Generate the instructions for '#pragma omp target data' directive.
void CodeGenFunction::EmitOMPTargetDataDirective(
const OMPTargetDataDirective &S) {
- // The target data enclosed region is implemented just by emitting the
- // statement.
- auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
- CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true);
+
+ // Create a pre/post action to signal the privatization of the device pointer.
+ // This action can be replaced by the OpenMP runtime code generation to
+ // deactivate privatization.
+ bool PrivatizeDevicePointers = false;
+ class DevicePointerPrivActionTy : public PrePostActionTy {
+ bool &PrivatizeDevicePointers;
+
+ public:
+ explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
+ : PrePostActionTy(), PrivatizeDevicePointers(PrivatizeDevicePointers) {}
+ void Enter(CodeGenFunction &CGF) override {
+ PrivatizeDevicePointers = true;
+ }
+ };
+ DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
+
+ auto &&CodeGen = [&S, &Info, &PrivatizeDevicePointers](
+ CodeGenFunction &CGF, PrePostActionTy &Action) {
+ auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
+ CGF.EmitStmt(
+ cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
+ };
+
+ // Codegen that selects wheather to generate the privatization code or not.
+ auto &&PrivCodeGen = [&S, &Info, &PrivatizeDevicePointers,
+ &InnermostCodeGen](CodeGenFunction &CGF,
+ PrePostActionTy &Action) {
+ RegionCodeGenTy RCG(InnermostCodeGen);
+ PrivatizeDevicePointers = false;
+
+ // Call the pre-action to change the status of PrivatizeDevicePointers if
+ // needed.
+ Action.Enter(CGF);
+
+ if (PrivatizeDevicePointers) {
+ OMPPrivateScope PrivateScope(CGF);
+ // Emit all instances of the use_device_ptr clause.
+ for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
+ CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
+ Info.CaptureDeviceAddrMap);
+ (void)PrivateScope.Privatize();
+ RCG(CGF);
+ } else
+ RCG(CGF);
+ };
+
+ // Forward the provided action to the privatization codegen.
+ RegionCodeGenTy PrivRCG(PrivCodeGen);
+ PrivRCG.setAction(Action);
+
+ // Notwithstanding the body of the region is emitted as inlined directive,
+ // we don't use an inline scope as changes in the references inside the
+ // region are expected to be visible outside, so we do not privative them.
+ OMPLexicalScope Scope(CGF, S);
+ CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data,
+ PrivRCG);
};
+ RegionCodeGenTy RCG(CodeGen);
+
// If we don't have target devices, don't bother emitting the data mapping
// code.
if (CGM.getLangOpts().OMPTargetTriples.empty()) {
- OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
-
- CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_target_data,
- CodeGen);
+ RCG(*this);
return;
}
@@ -3439,7 +3654,12 @@ void CodeGenFunction::EmitOMPTargetDataDirective(
if (auto *C = S.getSingleClause<OMPDeviceClause>())
Device = C->getDevice();
- CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, CodeGen);
+ // Set the action to signal privatization of device pointers.
+ RCG.setAction(PrivAction);
+
+ // Emit region code.
+ CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG,
+ Info);
}
void CodeGenFunction::EmitOMPTargetEnterDataDirective(
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index 5b90ee603307..92fd93b5ca38 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -23,7 +23,7 @@ GetAddrOfVTTVTable(CodeGenVTables &CGVT, CodeGenModule &CGM,
const CXXRecordDecl *MostDerivedClass,
const VTTVTable &VTable,
llvm::GlobalVariable::LinkageTypes Linkage,
- llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
+ VTableLayout::AddressPointsMapTy &AddressPoints) {
if (VTable.getBase() == MostDerivedClass) {
assert(VTable.getBaseOffset().isZero() &&
"Most derived class vtable must have a zero offset!");
@@ -62,25 +62,27 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
*e = Builder.getVTTComponents().end(); i != e; ++i) {
const VTTVTable &VTTVT = Builder.getVTTVTables()[i->VTableIndex];
llvm::GlobalVariable *VTable = VTables[i->VTableIndex];
- uint64_t AddressPoint;
+ VTableLayout::AddressPointLocation AddressPoint;
if (VTTVT.getBase() == RD) {
// Just get the address point for the regular vtable.
AddressPoint =
getItaniumVTableContext().getVTableLayout(RD).getAddressPoint(
i->VTableBase);
- assert(AddressPoint != 0 && "Did not find vtable address point!");
} else {
AddressPoint = VTableAddressPoints[i->VTableIndex].lookup(i->VTableBase);
- assert(AddressPoint != 0 && "Did not find ctor vtable address point!");
+ assert(AddressPoint.AddressPointIndex != 0 &&
+ "Did not find ctor vtable address point!");
}
llvm::Value *Idxs[] = {
llvm::ConstantInt::get(Int32Ty, 0),
- llvm::ConstantInt::get(Int32Ty, AddressPoint)
+ llvm::ConstantInt::get(Int32Ty, AddressPoint.VTableIndex),
+ llvm::ConstantInt::get(Int32Ty, AddressPoint.AddressPointIndex),
};
- llvm::Constant *Init = llvm::ConstantExpr::getInBoundsGetElementPtr(
- VTable->getValueType(), VTable, Idxs);
+ llvm::Constant *Init = llvm::ConstantExpr::getGetElementPtr(
+ VTable->getValueType(), VTable, Idxs, /*InBounds=*/true,
+ /*InRangeIndex=*/1);
Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 957055033890..1a09830b52fd 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -11,16 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenFunction.h"
#include "CGCXXABI.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantBuilder.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
-#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/Support/Compiler.h"
#include "llvm/Support/Format.h"
#include "llvm/Transforms/Utils/Cloning.h"
#include <algorithm>
@@ -32,7 +30,7 @@ using namespace CodeGen;
CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
: CGM(CGM), VTContext(CGM.getContext().getVTableContext()) {}
-llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
+llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
const ThunkInfo &Thunk) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
@@ -96,7 +94,7 @@ static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
AdjustNull = CGF.createBasicBlock("adjust.null");
AdjustNotNull = CGF.createBasicBlock("adjust.notnull");
AdjustEnd = CGF.createBasicBlock("adjust.end");
-
+
llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue);
CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
CGF.EmitBlock(AdjustNotNull);
@@ -113,14 +111,14 @@ static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
CGF.EmitBlock(AdjustNull);
CGF.Builder.CreateBr(AdjustEnd);
CGF.EmitBlock(AdjustEnd);
-
+
llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2);
PHI->addIncoming(ReturnValue, AdjustNotNull);
- PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
+ PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
AdjustNull);
ReturnValue = PHI;
}
-
+
return RValue::get(ReturnValue);
}
@@ -232,8 +230,11 @@ void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
CGM.getCXXABI().addImplicitStructorParams(*this, ResultType, FunctionArgs);
// Start defining the function.
+ auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
- MD->getLocation(), MD->getLocation());
+ MD->getLocation());
+ // Create a scope with an artificial location for the body of this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
// Since we didn't pass a GlobalDecl to StartFunction, do this ourselves.
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
@@ -251,7 +252,7 @@ void CodeGenFunction::FinishThunk() {
FinishFunction();
}
-void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
+void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Constant *CalleePtr,
const ThunkInfo *Thunk) {
assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
"Please use a new CGF for this thunk");
@@ -271,7 +272,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
CGM.ErrorUnsupported(
MD, "non-trivial argument copy for return-adjusting thunk");
}
- EmitMustTailThunk(MD, AdjustedThisPtr, Callee);
+ EmitMustTailThunk(MD, AdjustedThisPtr, CalleePtr);
return;
}
@@ -285,7 +286,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
// Add the rest of the arguments.
for (const ParmVarDecl *PD : MD->parameters())
- EmitDelegateCallArg(CallArgs, PD, PD->getLocStart());
+ EmitDelegateCallArg(CallArgs, PD, SourceLocation());
const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
@@ -317,10 +318,11 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType()))
Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
-
+
// Now emit our call.
llvm::Instruction *CallOrInvoke;
- RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD, &CallOrInvoke);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, MD);
+ RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, &CallOrInvoke);
// Consider return adjustment if we have ThunkInfo.
if (Thunk && !Thunk->Return.isEmpty())
@@ -340,7 +342,7 @@ void CodeGenFunction::EmitCallAndReturnForThunk(llvm::Value *Callee,
void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
llvm::Value *AdjustedThisPtr,
- llvm::Value *Callee) {
+ llvm::Value *CalleePtr) {
// Emitting a musttail call thunk doesn't use any of the CGCall.cpp machinery
// to translate AST arguments into LLVM IR arguments. For thunks, we know
// that the caller prototype more or less matches the callee prototype with
@@ -369,13 +371,14 @@ void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
// Emit the musttail call manually. Even if the prologue pushed cleanups, we
// don't actually want to run them.
- llvm::CallInst *Call = Builder.CreateCall(Callee, Args);
+ llvm::CallInst *Call = Builder.CreateCall(CalleePtr, Args);
Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
// Apply the standard set of call attributes.
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
- CGM.ConstructAttributeList(Callee->getName(), *CurFnInfo, MD, AttributeList,
+ CGM.ConstructAttributeList(CalleePtr->getName(),
+ *CurFnInfo, MD, AttributeList,
CallingConv, /*AttrOnCallSite=*/true);
llvm::AttributeSet Attrs =
llvm::AttributeSet::get(getLLVMContext(), AttributeList);
@@ -397,11 +400,13 @@ void CodeGenFunction::generateThunk(llvm::Function *Fn,
const CGFunctionInfo &FnInfo,
GlobalDecl GD, const ThunkInfo &Thunk) {
StartThunk(Fn, GD, FnInfo);
+ // Create a scope with an artificial location for the body of this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(*this);
// Get our callee.
llvm::Type *Ty =
CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
- llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+ llvm::Constant *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
// Make the call and return the result.
EmitCallAndReturnForThunk(Callee, &Thunk);
@@ -436,14 +441,14 @@ void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
// Remove the name from the old thunk function and get a new thunk.
OldThunkFn->setName(StringRef());
Entry = cast<llvm::GlobalValue>(CGM.GetAddrOfThunk(GD, Thunk));
-
+
// If needed, replace the old thunk with a bitcast.
if (!OldThunkFn->use_empty()) {
llvm::Constant *NewPtrForOldDecl =
llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
}
-
+
// Remove the old thunk.
OldThunkFn->eraseFromParent();
}
@@ -503,7 +508,7 @@ void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD,
void CodeGenVTables::EmitThunks(GlobalDecl GD)
{
- const CXXMethodDecl *MD =
+ const CXXMethodDecl *MD =
cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
// We don't need to generate thunks for the base destructor.
@@ -520,146 +525,146 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
emitThunk(GD, Thunk, /*ForVTable=*/false);
}
-llvm::Constant *CodeGenVTables::CreateVTableInitializer(
- const CXXRecordDecl *RD, const VTableComponent *Components,
- unsigned NumComponents, const VTableLayout::VTableThunkTy *VTableThunks,
- unsigned NumVTableThunks, llvm::Constant *RTTI) {
- SmallVector<llvm::Constant *, 64> Inits;
+void CodeGenVTables::addVTableComponent(
+ ConstantArrayBuilder &builder, const VTableLayout &layout,
+ unsigned idx, llvm::Constant *rtti, unsigned &nextVTableThunkIndex) {
+ auto &component = layout.vtable_components()[idx];
- llvm::Type *Int8PtrTy = CGM.Int8PtrTy;
-
- llvm::Type *PtrDiffTy =
- CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+ auto addOffsetConstant = [&](CharUnits offset) {
+ builder.add(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(CGM.PtrDiffTy, offset.getQuantity()),
+ CGM.Int8PtrTy));
+ };
- unsigned NextVTableThunkIndex = 0;
+ switch (component.getKind()) {
+ case VTableComponent::CK_VCallOffset:
+ return addOffsetConstant(component.getVCallOffset());
- llvm::Constant *PureVirtualFn = nullptr, *DeletedVirtualFn = nullptr;
+ case VTableComponent::CK_VBaseOffset:
+ return addOffsetConstant(component.getVBaseOffset());
- for (unsigned I = 0; I != NumComponents; ++I) {
- VTableComponent Component = Components[I];
+ case VTableComponent::CK_OffsetToTop:
+ return addOffsetConstant(component.getOffsetToTop());
- llvm::Constant *Init = nullptr;
+ case VTableComponent::CK_RTTI:
+ return builder.add(llvm::ConstantExpr::getBitCast(rtti, CGM.Int8PtrTy));
- switch (Component.getKind()) {
- case VTableComponent::CK_VCallOffset:
- Init = llvm::ConstantInt::get(PtrDiffTy,
- Component.getVCallOffset().getQuantity());
- Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
- break;
- case VTableComponent::CK_VBaseOffset:
- Init = llvm::ConstantInt::get(PtrDiffTy,
- Component.getVBaseOffset().getQuantity());
- Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ case VTableComponent::CK_FunctionPointer:
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ GlobalDecl GD;
+
+ // Get the right global decl.
+ switch (component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind");
+ case VTableComponent::CK_FunctionPointer:
+ GD = component.getFunctionDecl();
break;
- case VTableComponent::CK_OffsetToTop:
- Init = llvm::ConstantInt::get(PtrDiffTy,
- Component.getOffsetToTop().getQuantity());
- Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ case VTableComponent::CK_CompleteDtorPointer:
+ GD = GlobalDecl(component.getDestructorDecl(), Dtor_Complete);
break;
- case VTableComponent::CK_RTTI:
- Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
+ case VTableComponent::CK_DeletingDtorPointer:
+ GD = GlobalDecl(component.getDestructorDecl(), Dtor_Deleting);
break;
- case VTableComponent::CK_FunctionPointer:
- case VTableComponent::CK_CompleteDtorPointer:
- case VTableComponent::CK_DeletingDtorPointer: {
- GlobalDecl GD;
-
- // Get the right global decl.
- switch (Component.getKind()) {
- default:
- llvm_unreachable("Unexpected vtable component kind");
- case VTableComponent::CK_FunctionPointer:
- GD = Component.getFunctionDecl();
- break;
- case VTableComponent::CK_CompleteDtorPointer:
- GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
- break;
- case VTableComponent::CK_DeletingDtorPointer:
- GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
- break;
- }
-
- if (CGM.getLangOpts().CUDA) {
- // Emit NULL for methods we can't codegen on this
- // side. Otherwise we'd end up with vtable with unresolved
- // references.
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- // OK on device side: functions w/ __device__ attribute
- // OK on host side: anything except __device__-only functions.
- bool CanEmitMethod = CGM.getLangOpts().CUDAIsDevice
- ? MD->hasAttr<CUDADeviceAttr>()
- : (MD->hasAttr<CUDAHostAttr>() ||
- !MD->hasAttr<CUDADeviceAttr>());
- if (!CanEmitMethod) {
- Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
- break;
- }
- // Method is acceptable, continue processing as usual.
- }
+ }
- if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
- // We have a pure virtual member function.
- if (!PureVirtualFn) {
- llvm::FunctionType *Ty =
- llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- StringRef PureCallName = CGM.getCXXABI().GetPureVirtualCallName();
- PureVirtualFn = CGM.CreateRuntimeFunction(Ty, PureCallName);
- if (auto *F = dyn_cast<llvm::Function>(PureVirtualFn))
- F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
- CGM.Int8PtrTy);
- }
- Init = PureVirtualFn;
- } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) {
- if (!DeletedVirtualFn) {
- llvm::FunctionType *Ty =
- llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
- StringRef DeletedCallName =
- CGM.getCXXABI().GetDeletedVirtualCallName();
- DeletedVirtualFn = CGM.CreateRuntimeFunction(Ty, DeletedCallName);
- if (auto *F = dyn_cast<llvm::Function>(DeletedVirtualFn))
- F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- DeletedVirtualFn = llvm::ConstantExpr::getBitCast(DeletedVirtualFn,
- CGM.Int8PtrTy);
- }
- Init = DeletedVirtualFn;
- } else {
- // Check if we should use a thunk.
- if (NextVTableThunkIndex < NumVTableThunks &&
- VTableThunks[NextVTableThunkIndex].first == I) {
- const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
-
- maybeEmitThunkForVTable(GD, Thunk);
- Init = CGM.GetAddrOfThunk(GD, Thunk);
-
- NextVTableThunkIndex++;
- } else {
- llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
-
- Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
- }
-
- Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
- }
- break;
+ if (CGM.getLangOpts().CUDA) {
+ // Emit NULL for methods we can't codegen on this
+ // side. Otherwise we'd end up with vtable with unresolved
+ // references.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ // OK on device side: functions w/ __device__ attribute
+ // OK on host side: anything except __device__-only functions.
+ bool CanEmitMethod =
+ CGM.getLangOpts().CUDAIsDevice
+ ? MD->hasAttr<CUDADeviceAttr>()
+ : (MD->hasAttr<CUDAHostAttr>() || !MD->hasAttr<CUDADeviceAttr>());
+ if (!CanEmitMethod)
+ return builder.addNullPointer(CGM.Int8PtrTy);
+ // Method is acceptable, continue processing as usual.
}
- case VTableComponent::CK_UnusedFunctionPointer:
- Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
- break;
+ auto getSpecialVirtualFn = [&](StringRef name) {
+ llvm::FunctionType *fnTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ llvm::Constant *fn = CGM.CreateRuntimeFunction(fnTy, name);
+ if (auto f = dyn_cast<llvm::Function>(fn))
+ f->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ return llvm::ConstantExpr::getBitCast(fn, CGM.Int8PtrTy);
};
-
- Inits.push_back(Init);
+
+ llvm::Constant *fnPtr;
+
+ // Pure virtual member functions.
+ if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
+ if (!PureVirtualFn)
+ PureVirtualFn =
+ getSpecialVirtualFn(CGM.getCXXABI().GetPureVirtualCallName());
+ fnPtr = PureVirtualFn;
+
+ // Deleted virtual member functions.
+ } else if (cast<CXXMethodDecl>(GD.getDecl())->isDeleted()) {
+ if (!DeletedVirtualFn)
+ DeletedVirtualFn =
+ getSpecialVirtualFn(CGM.getCXXABI().GetDeletedVirtualCallName());
+ fnPtr = DeletedVirtualFn;
+
+ // Thunks.
+ } else if (nextVTableThunkIndex < layout.vtable_thunks().size() &&
+ layout.vtable_thunks()[nextVTableThunkIndex].first == idx) {
+ auto &thunkInfo = layout.vtable_thunks()[nextVTableThunkIndex].second;
+
+ maybeEmitThunkForVTable(GD, thunkInfo);
+ nextVTableThunkIndex++;
+ fnPtr = CGM.GetAddrOfThunk(GD, thunkInfo);
+
+ // Otherwise we can use the method definition directly.
+ } else {
+ llvm::Type *fnTy = CGM.getTypes().GetFunctionTypeForVTable(GD);
+ fnPtr = CGM.GetAddrOfFunction(GD, fnTy, /*ForVTable=*/true);
+ }
+
+ fnPtr = llvm::ConstantExpr::getBitCast(fnPtr, CGM.Int8PtrTy);
+ builder.add(fnPtr);
+ return;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer:
+ return builder.addNullPointer(CGM.Int8PtrTy);
+ }
+
+ llvm_unreachable("Unexpected vtable component kind");
+}
+
+llvm::Type *CodeGenVTables::getVTableType(const VTableLayout &layout) {
+ SmallVector<llvm::Type *, 4> tys;
+ for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) {
+ tys.push_back(llvm::ArrayType::get(CGM.Int8PtrTy, layout.getVTableSize(i)));
+ }
+
+ return llvm::StructType::get(CGM.getLLVMContext(), tys);
+}
+
+void CodeGenVTables::createVTableInitializer(ConstantStructBuilder &builder,
+ const VTableLayout &layout,
+ llvm::Constant *rtti) {
+ unsigned nextVTableThunkIndex = 0;
+ for (unsigned i = 0, e = layout.getNumVTables(); i != e; ++i) {
+ auto vtableElem = builder.beginArray(CGM.Int8PtrTy);
+ size_t thisIndex = layout.getVTableOffset(i);
+ size_t nextIndex = thisIndex + layout.getVTableSize(i);
+ for (unsigned i = thisIndex; i != nextIndex; ++i) {
+ addVTableComponent(vtableElem, layout, i, rtti, nextVTableThunkIndex);
+ }
+ vtableElem.finishAndAddTo(builder);
}
-
- llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
- return llvm::ConstantArray::get(ArrayType, Inits);
}
llvm::GlobalVariable *
-CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
- const BaseSubobject &Base,
- bool BaseIsVirtual,
+CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
+ const BaseSubobject &Base,
+ bool BaseIsVirtual,
llvm::GlobalVariable::LinkageTypes Linkage,
VTableAddressPointsMapTy& AddressPoints) {
if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
@@ -680,8 +685,7 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
Base.getBase(), Out);
StringRef Name = OutName.str();
- llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents());
+ llvm::Type *VTType = getVTableType(*VTLayout);
// Construction vtable symbols are not part of the Itanium ABI, so we cannot
// guarantee that they actually will be available externally. Instead, when
@@ -692,8 +696,8 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
Linkage = llvm::GlobalVariable::InternalLinkage;
// Create the variable that will hold the construction vtable.
- llvm::GlobalVariable *VTable =
- CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage);
+ llvm::GlobalVariable *VTable =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, VTType, Linkage);
CGM.setGlobalVisibility(VTable, RD);
// V-tables are always unnamed_addr.
@@ -703,12 +707,11 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
CGM.getContext().getTagDeclType(Base.getBase()));
// Create and set the initializer.
- llvm::Constant *Init = CreateVTableInitializer(
- Base.getBase(), VTLayout->vtable_component_begin(),
- VTLayout->getNumVTableComponents(), VTLayout->vtable_thunk_begin(),
- VTLayout->getNumVTableThunks(), RTTI);
- VTable->setInitializer(Init);
-
+ ConstantInitBuilder builder(CGM);
+ auto components = builder.beginStruct();
+ createVTableInitializer(components, *VTLayout, RTTI);
+ components.finishAndSetAsInitializer(VTable);
+
CGM.EmitVTableTypeMetadata(VTable, *VTLayout.get());
return VTable;
@@ -723,7 +726,7 @@ static bool shouldEmitAvailableExternallyVTable(const CodeGenModule &CGM,
/// Compute the required linkage of the vtable for the given class.
///
/// Note that we only call this at the end of the translation unit.
-llvm::GlobalVariable::LinkageTypes
+llvm::GlobalVariable::LinkageTypes
CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
if (!RD->isExternallyVisible())
return llvm::GlobalVariable::InternalLinkage;
@@ -737,7 +740,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
const FunctionDecl *def = nullptr;
if (keyFunction->hasBody(def))
keyFunction = cast<CXXMethodDecl>(def);
-
+
switch (keyFunction->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
@@ -751,7 +754,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::LinkOnceODRLinkage :
llvm::Function::InternalLinkage;
-
+
return llvm::GlobalVariable::ExternalLinkage;
case TSK_ImplicitInstantiation:
@@ -763,7 +766,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::WeakODRLinkage :
llvm::Function::InternalLinkage;
-
+
case TSK_ExplicitInstantiationDeclaration:
llvm_unreachable("Should not have been asked to emit this");
}
@@ -819,7 +822,7 @@ void CodeGenModule::EmitVTable(CXXRecordDecl *theClass) {
VTables.GenerateClassData(theClass);
}
-void
+void
CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) {
if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
DI->completeClassData(RD);
@@ -949,7 +952,10 @@ void CodeGenModule::EmitVTableTypeMetadata(llvm::GlobalVariable *VTable,
std::vector<BSEntry> BitsetEntries;
// Create a bit set entry for each address point.
for (auto &&AP : VTLayout.getAddressPoints())
- BitsetEntries.push_back(std::make_pair(AP.first.getBase(), AP.second));
+ BitsetEntries.push_back(
+ std::make_pair(AP.first.getBase(),
+ VTLayout.getVTableOffset(AP.second.VTableIndex) +
+ AP.second.AddressPointIndex));
// Sort the bit set entries for determinism.
std::sort(BitsetEntries.begin(), BitsetEntries.end(),
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index c27e54af258d..b92212c368a9 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -27,6 +27,8 @@ namespace clang {
namespace CodeGen {
class CodeGenModule;
+ class ConstantArrayBuilder;
+ class ConstantStructBuilder;
class CodeGenVTables {
CodeGenModule &CGM;
@@ -34,7 +36,7 @@ class CodeGenVTables {
VTableContextBase *VTContext;
/// VTableAddressPointsMapTy - Address points for a single vtable.
- typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy;
+ typedef VTableLayout::AddressPointsMapTy VTableAddressPointsMapTy;
typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy;
@@ -49,6 +51,12 @@ class CodeGenVTables {
/// indices.
SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices;
+ /// Cache for the pure virtual member call function.
+ llvm::Constant *PureVirtualFn = nullptr;
+
+ /// Cache for the deleted virtual member call function.
+ llvm::Constant *DeletedVirtualFn = nullptr;
+
/// emitThunk - Emit a single thunk.
void emitThunk(GlobalDecl GD, const ThunkInfo &Thunk, bool ForVTable);
@@ -56,15 +64,17 @@ class CodeGenVTables {
/// the ABI.
void maybeEmitThunkForVTable(GlobalDecl GD, const ThunkInfo &Thunk);
+ void addVTableComponent(ConstantArrayBuilder &builder,
+ const VTableLayout &layout, unsigned idx,
+ llvm::Constant *rtti,
+ unsigned &nextVTableThunkIndex);
+
public:
- /// CreateVTableInitializer - Create a vtable initializer for the given record
- /// decl.
- /// \param Components - The vtable components; this is really an array of
- /// VTableComponents.
- llvm::Constant *CreateVTableInitializer(
- const CXXRecordDecl *RD, const VTableComponent *Components,
- unsigned NumComponents, const VTableLayout::VTableThunkTy *VTableThunks,
- unsigned NumVTableThunks, llvm::Constant *RTTI);
+ /// Add vtable components for the given vtable layout to the given
+ /// global initializer.
+ void createVTableInitializer(ConstantStructBuilder &builder,
+ const VTableLayout &layout,
+ llvm::Constant *rtti);
CodeGenVTables(CodeGenModule &CGM);
@@ -112,6 +122,11 @@ public:
void GenerateClassData(const CXXRecordDecl *RD);
bool isVTableExternal(const CXXRecordDecl *RD);
+
+ /// Returns the type of a vtable with the given layout. Normally a struct of
+ /// arrays of pointers, with one struct element for each vtable in the vtable
+ /// group.
+ llvm::Type *getVTableType(const VTableLayout &layout);
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 257dec9629b2..4fbf9f22a98d 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -3,15 +3,18 @@ set(LLVM_LINK_COMPONENTS
BitReader
BitWriter
Core
+ Coroutines
Coverage
IPO
IRReader
InstCombine
Instrumentation
+ LTO
Linker
MC
ObjCARCOpts
Object
+ Passes
ProfileData
ScalarOpts
Support
@@ -41,6 +44,7 @@ add_clang_library(clangCodeGen
CGCall.cpp
CGClass.cpp
CGCleanup.cpp
+ CGCoroutine.cpp
CGDebugInfo.cpp
CGDecl.cpp
CGDeclCXX.cpp
@@ -79,12 +83,15 @@ add_clang_library(clangCodeGen
SanitizerMetadata.cpp
SwiftCallingConv.cpp
TargetInfo.cpp
+ VarBypassDetector.cpp
DEPENDS
${codegen_deps}
LINK_LIBS
+ clangAnalysis
clangAST
+ clangAnalysis
clangBasic
clangFrontend
clangLex
diff --git a/lib/CodeGen/CodeGenAction.cpp b/lib/CodeGen/CodeGenAction.cpp
index 49738a20f493..1e17918df4a4 100644
--- a/lib/CodeGen/CodeGenAction.cpp
+++ b/lib/CodeGen/CodeGenAction.cpp
@@ -21,8 +21,7 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Lex/Preprocessor.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/DebugInfo.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/DiagnosticPrinter.h"
@@ -34,6 +33,8 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/Timer.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/YAMLTraits.h"
#include <memory>
using namespace clang;
using namespace llvm;
@@ -50,6 +51,12 @@ namespace clang {
ASTContext *Context;
Timer LLVMIRGeneration;
+ unsigned LLVMIRGenerationRefCount;
+
+ /// True if we've finished generating IR. This prevents us from generating
+ /// additional LLVM IR after emitting output in HandleTranslationUnit. This
+ /// can happen when Clang plugins trigger additional AST deserialization.
+ bool IRGenFinished = false;
std::unique_ptr<CodeGenerator> Gen;
@@ -73,7 +80,8 @@ namespace clang {
: Diags(Diags), Action(Action), CodeGenOpts(CodeGenOpts),
TargetOpts(TargetOpts), LangOpts(LangOpts),
AsmOutStream(std::move(OS)), Context(nullptr),
- LLVMIRGeneration("LLVM IR Generation Time"),
+ LLVMIRGeneration("irgen", "LLVM IR Generation Time"),
+ LLVMIRGenerationRefCount(0),
Gen(CreateLLVMCodeGen(Diags, InFile, HeaderSearchOpts, PPOpts,
CodeGenOpts, C, CoverageInfo)) {
llvm::TimePassesIsEnabled = TimePasses;
@@ -113,13 +121,20 @@ namespace clang {
Context->getSourceManager(),
"LLVM IR generation of declaration");
- if (llvm::TimePassesIsEnabled)
- LLVMIRGeneration.startTimer();
+ // Recurse.
+ if (llvm::TimePassesIsEnabled) {
+ LLVMIRGenerationRefCount += 1;
+ if (LLVMIRGenerationRefCount == 1)
+ LLVMIRGeneration.startTimer();
+ }
Gen->HandleTopLevelDecl(D);
- if (llvm::TimePassesIsEnabled)
- LLVMIRGeneration.stopTimer();
+ if (llvm::TimePassesIsEnabled) {
+ LLVMIRGenerationRefCount -= 1;
+ if (LLVMIRGenerationRefCount == 0)
+ LLVMIRGeneration.stopTimer();
+ }
return true;
}
@@ -137,16 +152,30 @@ namespace clang {
LLVMIRGeneration.stopTimer();
}
+ void HandleInterestingDecl(DeclGroupRef D) override {
+ // Ignore interesting decls from the AST reader after IRGen is finished.
+ if (!IRGenFinished)
+ HandleTopLevelDecl(D);
+ }
+
void HandleTranslationUnit(ASTContext &C) override {
{
PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
- if (llvm::TimePassesIsEnabled)
- LLVMIRGeneration.startTimer();
+ if (llvm::TimePassesIsEnabled) {
+ LLVMIRGenerationRefCount += 1;
+ if (LLVMIRGenerationRefCount == 1)
+ LLVMIRGeneration.startTimer();
+ }
Gen->HandleTranslationUnit(C);
- if (llvm::TimePassesIsEnabled)
- LLVMIRGeneration.stopTimer();
+ if (llvm::TimePassesIsEnabled) {
+ LLVMIRGenerationRefCount -= 1;
+ if (LLVMIRGenerationRefCount == 0)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ IRGenFinished = true;
}
// Silently ignore if we weren't initialized for some reason.
@@ -165,6 +194,26 @@ namespace clang {
Ctx.getDiagnosticHandler();
void *OldDiagnosticContext = Ctx.getDiagnosticContext();
Ctx.setDiagnosticHandler(DiagnosticHandler, this);
+ Ctx.setDiagnosticHotnessRequested(CodeGenOpts.DiagnosticsWithHotness);
+
+ std::unique_ptr<llvm::tool_output_file> OptRecordFile;
+ if (!CodeGenOpts.OptRecordFile.empty()) {
+ std::error_code EC;
+ OptRecordFile =
+ llvm::make_unique<llvm::tool_output_file>(CodeGenOpts.OptRecordFile,
+ EC, sys::fs::F_None);
+ if (EC) {
+ Diags.Report(diag::err_cannot_open_file) <<
+ CodeGenOpts.OptRecordFile << EC.message();
+ return;
+ }
+
+ Ctx.setDiagnosticsOutputFile(
+ llvm::make_unique<yaml::Output>(OptRecordFile->os()));
+
+ if (CodeGenOpts.getProfileUse() != CodeGenOptions::ProfileNone)
+ Ctx.setDiagnosticHotnessRequested(true);
+ }
// Link LinkModule into this module if present, preserving its validity.
for (auto &I : LinkModules) {
@@ -183,6 +232,9 @@ namespace clang {
Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
Ctx.setDiagnosticHandler(OldDiagnosticHandler, OldDiagnosticContext);
+
+ if (OptRecordFile)
+ OptRecordFile->keep();
}
void HandleTagDeclDefinition(TagDecl *D) override {
@@ -245,16 +297,13 @@ namespace clang {
/// them.
void EmitOptimizationMessage(const llvm::DiagnosticInfoOptimizationBase &D,
unsigned DiagID);
- void
- OptimizationRemarkHandler(const llvm::DiagnosticInfoOptimizationRemark &D);
- void OptimizationRemarkHandler(
- const llvm::DiagnosticInfoOptimizationRemarkMissed &D);
- void OptimizationRemarkHandler(
- const llvm::DiagnosticInfoOptimizationRemarkAnalysis &D);
+ void OptimizationRemarkHandler(const llvm::OptimizationRemark &D);
+ void OptimizationRemarkHandler(const llvm::OptimizationRemarkMissed &D);
+ void OptimizationRemarkHandler(const llvm::OptimizationRemarkAnalysis &D);
void OptimizationRemarkHandler(
- const llvm::DiagnosticInfoOptimizationRemarkAnalysisFPCommute &D);
+ const llvm::OptimizationRemarkAnalysisFPCommute &D);
void OptimizationRemarkHandler(
- const llvm::DiagnosticInfoOptimizationRemarkAnalysisAliasing &D);
+ const llvm::OptimizationRemarkAnalysisAliasing &D);
void OptimizationFailureHandler(
const llvm::DiagnosticInfoOptimizationFailure &D);
};
@@ -497,9 +546,16 @@ void BackendConsumer::EmitOptimizationMessage(
FullSourceLoc Loc = getBestLocationFromDebugLoc(D, BadDebugInfo, Filename,
Line, Column);
+ std::string Msg;
+ raw_string_ostream MsgStream(Msg);
+ MsgStream << D.getMsg();
+
+ if (D.getHotness())
+ MsgStream << " (hotness: " << *D.getHotness() << ")";
+
Diags.Report(Loc, DiagID)
- << AddFlagValue(D.getPassName() ? D.getPassName() : "")
- << D.getMsg().str();
+ << AddFlagValue(D.getPassName())
+ << MsgStream.str();
if (BadDebugInfo)
// If we were not able to translate the file:line:col information
@@ -511,7 +567,7 @@ void BackendConsumer::EmitOptimizationMessage(
}
void BackendConsumer::OptimizationRemarkHandler(
- const llvm::DiagnosticInfoOptimizationRemark &D) {
+ const llvm::OptimizationRemark &D) {
// Optimization remarks are active only if the -Rpass flag has a regular
// expression that matches the name of the pass name in \p D.
if (CodeGenOpts.OptimizationRemarkPattern &&
@@ -520,7 +576,7 @@ void BackendConsumer::OptimizationRemarkHandler(
}
void BackendConsumer::OptimizationRemarkHandler(
- const llvm::DiagnosticInfoOptimizationRemarkMissed &D) {
+ const llvm::OptimizationRemarkMissed &D) {
// Missed optimization remarks are active only if the -Rpass-missed
// flag has a regular expression that matches the name of the pass
// name in \p D.
@@ -531,7 +587,7 @@ void BackendConsumer::OptimizationRemarkHandler(
}
void BackendConsumer::OptimizationRemarkHandler(
- const llvm::DiagnosticInfoOptimizationRemarkAnalysis &D) {
+ const llvm::OptimizationRemarkAnalysis &D) {
// Optimization analysis remarks are active if the pass name is set to
// llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
// regular expression that matches the name of the pass name in \p D.
@@ -544,7 +600,7 @@ void BackendConsumer::OptimizationRemarkHandler(
}
void BackendConsumer::OptimizationRemarkHandler(
- const llvm::DiagnosticInfoOptimizationRemarkAnalysisFPCommute &D) {
+ const llvm::OptimizationRemarkAnalysisFPCommute &D) {
// Optimization analysis remarks are active if the pass name is set to
// llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
// regular expression that matches the name of the pass name in \p D.
@@ -557,7 +613,7 @@ void BackendConsumer::OptimizationRemarkHandler(
}
void BackendConsumer::OptimizationRemarkHandler(
- const llvm::DiagnosticInfoOptimizationRemarkAnalysisAliasing &D) {
+ const llvm::OptimizationRemarkAnalysisAliasing &D) {
// Optimization analysis remarks are active if the pass name is set to
// llvm::DiagnosticInfo::AlwasyPrint or if the -Rpass-analysis flag has a
// regular expression that matches the name of the pass name in \p D.
@@ -601,30 +657,27 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
case llvm::DK_OptimizationRemark:
// Optimization remarks are always handled completely by this
// handler. There is no generic way of emitting them.
- OptimizationRemarkHandler(cast<DiagnosticInfoOptimizationRemark>(DI));
+ OptimizationRemarkHandler(cast<OptimizationRemark>(DI));
return;
case llvm::DK_OptimizationRemarkMissed:
// Optimization remarks are always handled completely by this
// handler. There is no generic way of emitting them.
- OptimizationRemarkHandler(cast<DiagnosticInfoOptimizationRemarkMissed>(DI));
+ OptimizationRemarkHandler(cast<OptimizationRemarkMissed>(DI));
return;
case llvm::DK_OptimizationRemarkAnalysis:
// Optimization remarks are always handled completely by this
// handler. There is no generic way of emitting them.
- OptimizationRemarkHandler(
- cast<DiagnosticInfoOptimizationRemarkAnalysis>(DI));
+ OptimizationRemarkHandler(cast<OptimizationRemarkAnalysis>(DI));
return;
case llvm::DK_OptimizationRemarkAnalysisFPCommute:
// Optimization remarks are always handled completely by this
// handler. There is no generic way of emitting them.
- OptimizationRemarkHandler(
- cast<DiagnosticInfoOptimizationRemarkAnalysisFPCommute>(DI));
+ OptimizationRemarkHandler(cast<OptimizationRemarkAnalysisFPCommute>(DI));
return;
case llvm::DK_OptimizationRemarkAnalysisAliasing:
// Optimization remarks are always handled completely by this
// handler. There is no generic way of emitting them.
- OptimizationRemarkHandler(
- cast<DiagnosticInfoOptimizationRemarkAnalysisAliasing>(DI));
+ OptimizationRemarkHandler(cast<OptimizationRemarkAnalysisAliasing>(DI));
return;
case llvm::DK_OptimizationFailure:
// Optimization failures are always handled completely by this
@@ -732,11 +785,13 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return nullptr;
}
- ErrorOr<std::unique_ptr<llvm::Module>> ModuleOrErr =
- getLazyBitcodeModule(std::move(*BCBuf), *VMContext);
- if (std::error_code EC = ModuleOrErr.getError()) {
- CI.getDiagnostics().Report(diag::err_cannot_open_file) << LinkBCFile
- << EC.message();
+ Expected<std::unique_ptr<llvm::Module>> ModuleOrErr =
+ getOwningLazyBitcodeModule(std::move(*BCBuf), *VMContext);
+ if (!ModuleOrErr) {
+ handleAllErrors(ModuleOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << EIB.message();
+ });
LinkModules.clear();
return nullptr;
}
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 11e4ad9ecefa..a954f487d1e4 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -38,20 +38,35 @@
using namespace clang;
using namespace CodeGen;
+/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
+/// markers.
+static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
+ const LangOptions &LangOpts) {
+ // Asan uses markers for use-after-scope checks.
+ if (CGOpts.SanitizeAddressUseAfterScope)
+ return true;
+
+ // Disable lifetime markers in msan builds.
+ // FIXME: Remove this when msan works with lifetime markers.
+ if (LangOpts.Sanitize.has(SanitizerKind::Memory))
+ return false;
+
+ // For now, only in optimized builds.
+ return CGOpts.OptimizationLevel != 0;
+}
+
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
: CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
CGBuilderInserterTy(this)),
CurFn(nullptr), ReturnValue(Address::invalid()),
- CapturedStmtInfo(nullptr),
- SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false),
- CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false),
- IsOutlinedSEHHelper(false),
- BlockInfo(nullptr), BlockPointer(nullptr),
- LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr),
- NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr),
- ExceptionSlot(nullptr), EHSelectorSlot(nullptr),
- DebugInfo(CGM.getModuleDebugInfo()),
+ CapturedStmtInfo(nullptr), SanOpts(CGM.getLangOpts().Sanitize),
+ IsSanitizerScope(false), CurFuncIsThunk(false), AutoreleaseResult(false),
+ SawAsmBlock(false), IsOutlinedSEHHelper(false), BlockInfo(nullptr),
+ BlockPointer(nullptr), LambdaThisCaptureField(nullptr),
+ NormalCleanupDest(nullptr), NextCleanupDestIndex(1),
+ FirstBlockInfo(nullptr), EHResumeBlock(nullptr), ExceptionSlot(nullptr),
+ EHSelectorSlot(nullptr), DebugInfo(CGM.getModuleDebugInfo()),
DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr),
PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr),
CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
@@ -60,7 +75,9 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
CXXStructorImplicitParamDecl(nullptr),
CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
- TerminateHandler(nullptr), TrapBB(nullptr) {
+ TerminateHandler(nullptr), TrapBB(nullptr),
+ ShouldEmitLifetimeMarkers(
+ shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
if (!suppressNewContext)
CGM.getCXXABI().getMangleContext().startNewFunction();
@@ -429,12 +446,24 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
EmitNounwindRuntimeCall(F, args);
}
-void CodeGenFunction::EmitMCountInstrumentation() {
- llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
-
- llvm::Constant *MCountFn =
- CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName());
- EmitNounwindRuntimeCall(MCountFn);
+static void removeImageAccessQualifier(std::string& TyName) {
+ std::string ReadOnlyQual("__read_only");
+ std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
+ if (ReadOnlyPos != std::string::npos)
+ // "+ 1" for the space after access qualifier.
+ TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
+ else {
+ std::string WriteOnlyQual("__write_only");
+ std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
+ if (WriteOnlyPos != std::string::npos)
+ TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
+ else {
+ std::string ReadWriteQual("__read_write");
+ std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
+ if (ReadWritePos != std::string::npos)
+ TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
+ }
+ }
}
// Returns the address space id that should be produced to the
@@ -549,8 +578,6 @@ static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
if (ty.isCanonical() && pos != std::string::npos)
typeName.erase(pos+1, 8);
- argTypeNames.push_back(llvm::MDString::get(Context, typeName));
-
std::string baseTypeName;
if (isPipe)
baseTypeName = ty.getCanonicalType()->getAs<PipeType>()
@@ -560,6 +587,17 @@ static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
baseTypeName =
ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
+ // Remove access qualifiers on images
+ // (as they are inseparable from type in clang implementation,
+ // but OpenCL spec provides a special query to get access qualifier
+ // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
+ if (ty->isImageType()) {
+ removeImageAccessQualifier(typeName);
+ removeImageAccessQualifier(baseTypeName);
+ }
+
+ argTypeNames.push_back(llvm::MDString::get(Context, typeName));
+
// Turn "unsigned type" to "utype"
pos = baseTypeName.find("unsigned");
if (pos != std::string::npos)
@@ -709,6 +747,20 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
if (SanOpts.has(SanitizerKind::SafeStack))
Fn->addFnAttr(llvm::Attribute::SafeStack);
+ // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
+ // .cxx_destruct and all of their calees at run time.
+ if (SanOpts.has(SanitizerKind::Thread)) {
+ if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
+ IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
+ if (OMD->getMethodFamily() == OMF_dealloc ||
+ OMD->getMethodFamily() == OMF_initialize ||
+ (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
+ Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
+ Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
+ }
+ }
+ }
+
// Apply xray attributes to the function (as a string, for now)
if (D && ShouldXRayInstrumentFunction()) {
if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
@@ -723,27 +775,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
}
}
- // Pass inline keyword to optimizer if it appears explicitly on any
- // declaration. Also, in the case of -fno-inline attach NoInline
- // attribute to all functions that are not marked AlwaysInline, or
- // to all functions that are not marked inline or implicitly inline
- // in the case of -finline-hint-functions.
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- const CodeGenOptions& CodeGenOpts = CGM.getCodeGenOpts();
- if (!CodeGenOpts.NoInline) {
- for (auto RI : FD->redecls())
- if (RI->isInlineSpecified()) {
- Fn->addFnAttr(llvm::Attribute::InlineHint);
- break;
- }
- if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyHintInlining &&
- !FD->isInlined() && !Fn->hasFnAttribute(llvm::Attribute::InlineHint))
- Fn->addFnAttr(llvm::Attribute::NoInline);
- } else if (!FD->hasAttr<AlwaysInlineAttr>())
- Fn->addFnAttr(llvm::Attribute::NoInline);
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
if (CGM.getLangOpts().OpenMP && FD->hasAttr<OMPDeclareSimdDeclAttr>())
CGM.getOpenMPRuntime().emitDeclareSimdFunction(FD, Fn);
- }
// Add no-jump-tables value.
Fn->addFnAttr("no-jump-tables",
@@ -778,7 +812,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
if (FD->isMain())
Fn->addFnAttr(llvm::Attribute::NoRecurse);
-
+
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
// Create a marker to make it easy to insert allocas into the entryblock
@@ -811,8 +845,12 @@ void CodeGenFunction::StartFunction(GlobalDecl GD,
if (ShouldInstrumentFunction())
EmitFunctionInstrumentation("__cyg_profile_func_enter");
+ // Since emitting the mcount call here impacts optimizations such as function
+ // inlining, we just add an attribute to insert a mcount call in backend.
+ // The attribute "counting-function" is set to mcount function name which is
+ // architecture dependent.
if (CGM.getCodeGenOpts().InstrumentForProfiling)
- EmitMCountInstrumentation();
+ Fn->addFnAttr("counting-function", getTarget().getMCountName());
if (RetTy->isVoidType()) {
// Void type; nothing to return.
@@ -1040,6 +1078,13 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
if (SpecDecl->hasBody(SpecDecl))
Loc = SpecDecl->getLocation();
+ Stmt *Body = FD->getBody();
+
+ // Initialize helper which will detect jumps which can cause invalid lifetime
+ // markers.
+ if (Body && ShouldEmitLifetimeMarkers)
+ Bypasses.Init(Body);
+
// Emit the standard function prologue.
StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
@@ -1069,7 +1114,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// Implicit copy-assignment gets the same special treatment as implicit
// copy-constructors.
emitImplicitAssignmentOperatorBody(Args);
- } else if (Stmt *Body = FD->getBody()) {
+ } else if (Body) {
EmitFunctionBody(Args, Body);
} else
llvm_unreachable("no definition for emitted function");
@@ -1086,8 +1131,8 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
SanitizerScope SanScope(this);
llvm::Value *IsFalse = Builder.getFalse();
EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
- "missing_return", EmitCheckSourceLocation(FD->getLocation()),
- None);
+ SanitizerHandler::MissingReturn,
+ EmitCheckSourceLocation(FD->getLocation()), None);
} else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
EmitTrapCall(llvm::Intrinsic::trap);
}
@@ -1731,6 +1776,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::Enum:
case Type::Elaborated:
case Type::TemplateSpecialization:
+ case Type::ObjCTypeParam:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
@@ -1794,7 +1840,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
};
EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
SanitizerKind::VLABound),
- "vla_bound_not_positive", StaticArgs, Size);
+ SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
}
// Always zexting here would be wrong if it weren't
@@ -1854,8 +1900,8 @@ Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
}
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
- llvm::Constant *Init) {
- assert (Init && "Invalid DeclRefExpr initializer!");
+ const APValue &Init) {
+ assert(!Init.isUninit() && "Invalid DeclRefExpr initializer!");
if (CGDebugInfo *Dbg = getDebugInfo())
if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
Dbg->EmitGlobalVariable(E->getDecl(), Init);
@@ -2046,3 +2092,10 @@ void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
CGM.getSanStats().create(IRB, SSK);
}
+
+llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
+ if (CGDebugInfo *DI = getDebugInfo())
+ return DI->SourceLocToDebugLoc(Location);
+
+ return llvm::DebugLoc();
+}
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index fb19a2657c9c..222d0e97968a 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -21,6 +21,7 @@
#include "CodeGenModule.h"
#include "CodeGenPGO.h"
#include "EHScopeStack.h"
+#include "VarBypassDetector.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -77,6 +78,7 @@ class ObjCAutoreleasePoolStmt;
namespace CodeGen {
class CodeGenTypes;
+class CGCallee;
class CGFunctionInfo;
class CGRecordLayout;
class CGBlockInfo;
@@ -88,6 +90,7 @@ class BlockFieldFlags;
class RegionCodeGenTy;
class TargetCodeGenInfo;
struct OMPTaskDataTy;
+struct CGCoroData;
/// The kind of evaluation to perform on values of a particular
/// type. Basically, is the code in CGExprScalar, CGExprComplex, or
@@ -100,6 +103,32 @@ enum TypeEvaluationKind {
TEK_Aggregate
};
+#define LIST_SANITIZER_CHECKS \
+ SANITIZER_CHECK(AddOverflow, add_overflow, 0) \
+ SANITIZER_CHECK(BuiltinUnreachable, builtin_unreachable, 0) \
+ SANITIZER_CHECK(CFICheckFail, cfi_check_fail, 0) \
+ SANITIZER_CHECK(DivremOverflow, divrem_overflow, 0) \
+ SANITIZER_CHECK(DynamicTypeCacheMiss, dynamic_type_cache_miss, 0) \
+ SANITIZER_CHECK(FloatCastOverflow, float_cast_overflow, 0) \
+ SANITIZER_CHECK(FunctionTypeMismatch, function_type_mismatch, 0) \
+ SANITIZER_CHECK(LoadInvalidValue, load_invalid_value, 0) \
+ SANITIZER_CHECK(MissingReturn, missing_return, 0) \
+ SANITIZER_CHECK(MulOverflow, mul_overflow, 0) \
+ SANITIZER_CHECK(NegateOverflow, negate_overflow, 0) \
+ SANITIZER_CHECK(NonnullArg, nonnull_arg, 0) \
+ SANITIZER_CHECK(NonnullReturn, nonnull_return, 0) \
+ SANITIZER_CHECK(OutOfBounds, out_of_bounds, 0) \
+ SANITIZER_CHECK(ShiftOutOfBounds, shift_out_of_bounds, 0) \
+ SANITIZER_CHECK(SubOverflow, sub_overflow, 0) \
+ SANITIZER_CHECK(TypeMismatch, type_mismatch, 0) \
+ SANITIZER_CHECK(VLABoundNotPositive, vla_bound_not_positive, 0)
+
+enum SanitizerHandler {
+#define SANITIZER_CHECK(Enum, Name, Version) Enum,
+ LIST_SANITIZER_CHECKS
+#undef SANITIZER_CHECK
+};
+
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
class CodeGenFunction : public CodeGenTypeCache {
@@ -140,6 +169,10 @@ public:
LoopInfoStack LoopStack;
CGBuilderTy Builder;
+ // Stores variables for which we can't generate correct lifetime markers
+ // because of jumps.
+ VarBypassDetector Bypasses;
+
/// \brief CGBuilder insert helper. This function is called after an
/// instruction is created using Builder.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
@@ -155,6 +188,16 @@ public:
QualType FnRetTy;
llvm::Function *CurFn;
+ // Holds coroutine data if the current function is a coroutine. We use a
+ // wrapper to manage its lifetime, so that we don't have to define CGCoroData
+ // in this header.
+ struct CGCoroInfo {
+ std::unique_ptr<CGCoroData> Data;
+ CGCoroInfo();
+ ~CGCoroInfo();
+ };
+ CGCoroInfo CurCoro;
+
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
@@ -430,7 +473,7 @@ public:
LifetimeExtendedCleanupStack.resize(
LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size);
- static_assert(sizeof(Header) % llvm::AlignOf<T>::Alignment == 0,
+ static_assert(sizeof(Header) % alignof(T) == 0,
"Cleanup will be allocated on misaligned address");
char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
new (Buffer) LifetimeExtendedCleanupHeader(Header);
@@ -901,6 +944,17 @@ public:
e->getCommon());
}
+ /// Build the opaque value mapping for an OpaqueValueExpr whose source
+ /// expression is set to the expression the OVE represents.
+ OpaqueValueMapping(CodeGenFunction &CGF, const OpaqueValueExpr *OV)
+ : CGF(CGF) {
+ if (OV) {
+ assert(OV->getSourceExpr() && "wrong form of OpaqueValueMapping used "
+ "for OVE with no source expression");
+ Data = OpaqueValueMappingData::bind(CGF, OV, OV->getSourceExpr());
+ }
+ }
+
OpaqueValueMapping(CodeGenFunction &CGF,
const OpaqueValueExpr *opaqueValue,
LValue lvalue)
@@ -1166,6 +1220,23 @@ public:
CharUnits OldCXXThisAlignment;
};
+ /// The scope of an ArrayInitLoopExpr. Within this scope, the value of the
+ /// current loop index is overridden.
+ class ArrayInitLoopExprScope {
+ public:
+ ArrayInitLoopExprScope(CodeGenFunction &CGF, llvm::Value *Index)
+ : CGF(CGF), OldArrayInitIndex(CGF.ArrayInitIndex) {
+ CGF.ArrayInitIndex = Index;
+ }
+ ~ArrayInitLoopExprScope() {
+ CGF.ArrayInitIndex = OldArrayInitIndex;
+ }
+
+ private:
+ CodeGenFunction &CGF;
+ llvm::Value *OldArrayInitIndex;
+ };
+
class InlinedInheritingConstructorScope {
public:
InlinedInheritingConstructorScope(CodeGenFunction &CGF, GlobalDecl GD)
@@ -1234,6 +1305,10 @@ private:
/// this expression.
Address CXXDefaultInitExprThis = Address::invalid();
+ /// The current array initialization index when evaluating an
+ /// ArrayInitIndexExpr within an ArrayInitLoopExpr.
+ llvm::Value *ArrayInitIndex = nullptr;
+
/// The values of function arguments to use when evaluating
/// CXXInheritedCtorInitExprs within this context.
CallArgList CXXInheritedCtorInitExprArgs;
@@ -1263,6 +1338,9 @@ private:
llvm::BasicBlock *TerminateHandler;
llvm::BasicBlock *TrapBB;
+ /// True if we need emit the life-time markers.
+ const bool ShouldEmitLifetimeMarkers;
+
/// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
/// In the kernel metadata node, reference the kernel function and metadata
/// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
@@ -1421,7 +1499,6 @@ public:
//===--------------------------------------------------------------------===//
llvm::Value *EmitBlockLiteral(const BlockExpr *);
- llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
static void destroyBlockInfos(CGBlockInfo *info);
llvm::Function *GenerateBlockFunction(GlobalDecl GD,
@@ -1503,7 +1580,8 @@ public:
void StartThunk(llvm::Function *Fn, GlobalDecl GD,
const CGFunctionInfo &FnInfo);
- void EmitCallAndReturnForThunk(llvm::Value *Callee, const ThunkInfo *Thunk);
+ void EmitCallAndReturnForThunk(llvm::Constant *Callee,
+ const ThunkInfo *Thunk);
void FinishThunk();
@@ -1522,8 +1600,7 @@ public:
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
FunctionArgList &Args);
- void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
- ArrayRef<VarDecl *> ArrayIndexes);
+ void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init);
/// Struct with all informations about dynamic [sub]class needed to set vptr.
struct VPtr {
@@ -1932,6 +2009,9 @@ public:
return it->second;
}
+ /// Get the index of the current ArrayInitLoopExpr, if any.
+ llvm::Value *getArrayInitIndex() { return ArrayInitIndex; }
+
/// getAccessedFieldNo - Given an encoded value and a result number, return
/// the input field number being accessed.
static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
@@ -2110,7 +2190,8 @@ public:
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
- QualType DeleteTy);
+ QualType DeleteTy, llvm::Value *NumElements = nullptr,
+ CharUnits CookieSize = CharUnits());
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
const Expr *Arg, bool IsDelete);
@@ -2179,6 +2260,10 @@ public:
OffsetValue);
}
+ /// Converts Location to a DebugLoc, if debug information is enabled.
+ llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Location);
+
+
//===--------------------------------------------------------------------===//
// Declaration Emission
//===--------------------------------------------------------------------===//
@@ -2195,7 +2280,6 @@ public:
void EmitScalarInit(const Expr *init, const ValueDecl *D, LValue lvalue,
bool capturedByInit);
- void EmitScalarInit(llvm::Value *init, LValue lvalue);
typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
llvm::Value *Address);
@@ -2378,6 +2462,9 @@ public:
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
+ void EmitCoroutineBody(const CoroutineBodyStmt &S);
+ RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
+
void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
@@ -2480,6 +2567,9 @@ public:
OMPPrivateScope &PrivateScope);
void EmitOMPPrivateClause(const OMPExecutableDirective &D,
OMPPrivateScope &PrivateScope);
+ void EmitOMPUseDevicePtrClause(
+ const OMPClause &C, OMPPrivateScope &PrivateScope,
+ const llvm::DenseMap<const ValueDecl *, Address> &CaptureDeviceAddrMap);
/// \brief Emit code for copyin clause in \a D directive. The next code is
/// generated at the start of outlined functions for directives:
/// \code
@@ -2596,6 +2686,19 @@ public:
void EmitOMPDistributeSimdDirective(const OMPDistributeSimdDirective &S);
void EmitOMPTargetParallelForSimdDirective(
const OMPTargetParallelForSimdDirective &S);
+ void EmitOMPTargetSimdDirective(const OMPTargetSimdDirective &S);
+ void EmitOMPTeamsDistributeDirective(const OMPTeamsDistributeDirective &S);
+ void
+ EmitOMPTeamsDistributeSimdDirective(const OMPTeamsDistributeSimdDirective &S);
+ void EmitOMPTeamsDistributeParallelForSimdDirective(
+ const OMPTeamsDistributeParallelForSimdDirective &S);
+ void EmitOMPTeamsDistributeParallelForDirective(
+ const OMPTeamsDistributeParallelForDirective &S);
+ void EmitOMPTargetTeamsDirective(const OMPTargetTeamsDirective &S);
+ void EmitOMPTargetTeamsDistributeDirective(
+ const OMPTargetTeamsDistributeDirective &S);
+ void EmitOMPTargetTeamsDistributeParallelForDirective(
+ const OMPTargetTeamsDistributeParallelForDirective &S);
/// Emit outlined function for the target directive.
static std::pair<llvm::Function * /*OutlinedFn*/,
@@ -2626,6 +2729,9 @@ public:
OMPPrivateScope &LoopScope);
private:
+ /// Helpers for blocks
+ llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
+
/// Helpers for the OpenMP loop directives.
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
@@ -2893,7 +2999,7 @@ public:
LValue EmitStmtExprLValue(const StmtExpr *E);
LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
- void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
+ void EmitDeclRefExprDbgValue(const DeclRefExpr *E, const APValue &Init);
//===--------------------------------------------------------------------===//
// Scalar Expression Emission
@@ -2902,17 +3008,17 @@ public:
/// EmitCall - Generate a call of the given function, expecting the given
/// result type, and using the given argument list which specifies both the
/// LLVM arguments and the types they were derived from.
- RValue EmitCall(const CGFunctionInfo &FnInfo, llvm::Value *Callee,
+ RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
ReturnValueSlot ReturnValue, const CallArgList &Args,
- CGCalleeInfo CalleeInfo = CGCalleeInfo(),
llvm::Instruction **callOrInvoke = nullptr);
- RValue EmitCall(QualType FnType, llvm::Value *Callee, const CallExpr *E,
+ RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
ReturnValueSlot ReturnValue,
- CGCalleeInfo CalleeInfo = CGCalleeInfo(),
llvm::Value *Chain = nullptr);
RValue EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue = ReturnValueSlot());
+ RValue EmitSimpleCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
+ CGCallee EmitCallee(const Expr *E);
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl);
@@ -2938,20 +3044,23 @@ public:
void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
ArrayRef<llvm::Value*> args);
- llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
- NestedNameSpecifier *Qual,
- llvm::Type *Ty);
+ CGCallee BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ llvm::Type *Ty);
- llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
- CXXDtorType Type,
- const CXXRecordDecl *RD);
+ CGCallee BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD);
RValue
- EmitCXXMemberOrOperatorCall(const CXXMethodDecl *MD, llvm::Value *Callee,
+ EmitCXXMemberOrOperatorCall(const CXXMethodDecl *Method,
+ const CGCallee &Callee,
ReturnValueSlot ReturnValue, llvm::Value *This,
llvm::Value *ImplicitParam,
- QualType ImplicitParamTy, const CallExpr *E);
- RValue EmitCXXDestructorCall(const CXXDestructorDecl *DD, llvm::Value *Callee,
+ QualType ImplicitParamTy, const CallExpr *E,
+ CallArgList *RtlArgs);
+ RValue EmitCXXDestructorCall(const CXXDestructorDecl *DD,
+ const CGCallee &Callee,
llvm::Value *This, llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *E,
StructorType Type);
@@ -2974,6 +3083,7 @@ public:
RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
ReturnValueSlot ReturnValue);
+ RValue EmitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E);
RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
ReturnValueSlot ReturnValue);
@@ -3029,6 +3139,12 @@ public:
llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
const CallExpr *E);
+private:
+ enum class MSVCIntrin;
+
+public:
+ llvm::Value *EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E);
+
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
@@ -3292,7 +3408,7 @@ public:
/// sanitizer runtime with the provided arguments, and create a conditional
/// branch to it.
void EmitCheck(ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
- StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs,
+ SanitizerHandler Check, ArrayRef<llvm::Constant *> StaticArgs,
ArrayRef<llvm::Value *> DynamicArgs);
/// \brief Emit a slow path cross-DSO CFI check which calls __cfi_slowpath
@@ -3400,12 +3516,22 @@ public:
static bool isObjCMethodWithTypeParams(const T *) { return false; }
#endif
+ enum class EvaluationOrder {
+ ///! No language constraints on evaluation order.
+ Default,
+ ///! Language semantics require left-to-right evaluation.
+ ForceLeftToRight,
+ ///! Language semantics require right-to-left evaluation.
+ ForceRightToLeft
+ };
+
/// EmitCallArgs - Emit call arguments for a function.
template <typename T>
void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
const FunctionDecl *CalleeDecl = nullptr,
- unsigned ParamsToSkip = 0) {
+ unsigned ParamsToSkip = 0,
+ EvaluationOrder Order = EvaluationOrder::Default) {
SmallVector<QualType, 16> ArgTypes;
CallExpr::const_arg_iterator Arg = ArgRange.begin();
@@ -3445,13 +3571,14 @@ public:
for (auto *A : llvm::make_range(Arg, ArgRange.end()))
ArgTypes.push_back(getVarArgType(A));
- EmitCallArgs(Args, ArgTypes, ArgRange, CalleeDecl, ParamsToSkip);
+ EmitCallArgs(Args, ArgTypes, ArgRange, CalleeDecl, ParamsToSkip, Order);
}
void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
const FunctionDecl *CalleeDecl = nullptr,
- unsigned ParamsToSkip = 0);
+ unsigned ParamsToSkip = 0,
+ EvaluationOrder Order = EvaluationOrder::Default);
/// EmitPointerWithAlignment - Given an expression with a pointer
/// type, emit the value and compute our best estimate of the
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 0161cfb611ca..ab29d2dbb566 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -24,6 +24,7 @@
#include "CodeGenFunction.h"
#include "CodeGenPGO.h"
#include "CodeGenTBAA.h"
+#include "ConstantBuilder.h"
#include "CoverageMappingGen.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
@@ -43,7 +44,6 @@
#include "clang/Basic/Version.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Sema/SemaDiagnostic.h"
-#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/Triple.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
@@ -102,10 +102,13 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
PointerAlignInBytes =
C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ SizeSizeInBytes =
+ C.toCharUnitsFromBits(C.getTargetInfo().getMaxPointerWidth()).getQuantity();
IntAlignInBytes =
C.toCharUnitsFromBits(C.getTargetInfo().getIntAlign()).getQuantity();
IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
- IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
+ IntPtrTy = llvm::IntegerType::get(LLVMContext,
+ C.getTargetInfo().getMaxPointerWidth());
Int8PtrTy = Int8Ty->getPointerTo(0);
Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
@@ -187,8 +190,7 @@ void CodeGenModule::createOpenCLRuntime() {
void CodeGenModule::createOpenMPRuntime() {
// Select a specialized code generation class based on the target, if any.
// If it does not exist use the default implementation.
- switch (getTarget().getTriple().getArch()) {
-
+ switch (getTriple().getArch()) {
case llvm::Triple::nvptx:
case llvm::Triple::nvptx64:
assert(getLangOpts().OpenMPIsDevice &&
@@ -469,7 +471,7 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Override, "Cross-DSO CFI", 1);
}
- if (LangOpts.CUDAIsDevice && getTarget().getTriple().isNVPTX()) {
+ if (LangOpts.CUDAIsDevice && getTriple().isNVPTX()) {
// Indicate whether __nvvm_reflect should be configured to flush denormal
// floating point values to 0. (This corresponds to its "__CUDA_FTZ"
// property.)
@@ -672,7 +674,16 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
} else {
IdentifierInfo *II = ND->getIdentifier();
assert(II && "Attempt to mangle unnamed decl.");
- Str = II->getName();
+ const auto *FD = dyn_cast<FunctionDecl>(ND);
+
+ if (FD &&
+ FD->getType()->castAs<FunctionType>()->getCallConv() == CC_X86RegCall) {
+ llvm::raw_svector_ostream Out(Buffer);
+ Out << "__regcall3__" << II->getName();
+ Str = Out.str();
+ } else {
+ Str = II->getName();
+ }
}
// Keep the first result in the case of a mangling collision.
@@ -720,7 +731,9 @@ void CodeGenModule::AddGlobalDtor(llvm::Function *Dtor, int Priority) {
GlobalDtors.push_back(Structor(Priority, Dtor, nullptr));
}
-void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
+void CodeGenModule::EmitCtorList(CtorList &Fns, const char *GlobalName) {
+ if (Fns.empty()) return;
+
// Ctor function type is void()*.
llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
@@ -730,24 +743,29 @@ void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
Int32Ty, llvm::PointerType::getUnqual(CtorFTy), VoidPtrTy, nullptr);
// Construct the constructor and destructor arrays.
- SmallVector<llvm::Constant *, 8> Ctors;
+ ConstantInitBuilder builder(*this);
+ auto ctors = builder.beginArray(CtorStructTy);
for (const auto &I : Fns) {
- llvm::Constant *S[] = {
- llvm::ConstantInt::get(Int32Ty, I.Priority, false),
- llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy),
- (I.AssociatedData
- ? llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy)
- : llvm::Constant::getNullValue(VoidPtrTy))};
- Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
+ auto ctor = ctors.beginStruct(CtorStructTy);
+ ctor.addInt(Int32Ty, I.Priority);
+ ctor.add(llvm::ConstantExpr::getBitCast(I.Initializer, CtorPFTy));
+ if (I.AssociatedData)
+ ctor.add(llvm::ConstantExpr::getBitCast(I.AssociatedData, VoidPtrTy));
+ else
+ ctor.addNullPointer(VoidPtrTy);
+ ctor.finishAndAddTo(ctors);
}
- if (!Ctors.empty()) {
- llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
- new llvm::GlobalVariable(TheModule, AT, false,
- llvm::GlobalValue::AppendingLinkage,
- llvm::ConstantArray::get(AT, Ctors),
- GlobalName);
- }
+ auto list =
+ ctors.finishAndCreateGlobal(GlobalName, getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::AppendingLinkage);
+
+ // The LTO linker doesn't seem to like it when we set an alignment
+ // on appending variables. Take it off as a workaround.
+ list->setAlignment(0);
+
+ Fns.clear();
}
llvm::GlobalValue::LinkageTypes
@@ -800,14 +818,7 @@ llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
llvm::MDString *MDS = dyn_cast<llvm::MDString>(MD);
if (!MDS) return nullptr;
- llvm::MD5 md5;
- llvm::MD5::MD5Result result;
- md5.update(MDS->getString());
- md5.final(result);
- uint64_t id = 0;
- for (int i = 0; i < 8; ++i)
- id |= static_cast<uint64_t>(result[i]) << (i * 8);
- return llvm::ConstantInt::get(Int64Ty, id);
+ return llvm::ConstantInt::get(Int64Ty, llvm::MD5Hash(MDS->getString()));
}
void CodeGenModule::setFunctionDefinitionAttributes(const FunctionDecl *D,
@@ -864,6 +875,13 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
B.addAttribute(llvm::Attribute::StackProtectReq);
if (!D) {
+ // If we don't have a declaration to control inlining, the function isn't
+ // explicitly marked as alwaysinline for semantic reasons, and inlining is
+ // disabled, mark the function as noinline.
+ if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
+ CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining)
+ B.addAttribute(llvm::Attribute::NoInline);
+
F->addAttributes(llvm::AttributeSet::FunctionIndex,
llvm::AttributeSet::get(
F->getContext(),
@@ -871,7 +889,23 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
return;
}
- if (D->hasAttr<NakedAttr>()) {
+ if (D->hasAttr<OptimizeNoneAttr>()) {
+ B.addAttribute(llvm::Attribute::OptimizeNone);
+
+ // OptimizeNone implies noinline; we should not be inlining such functions.
+ B.addAttribute(llvm::Attribute::NoInline);
+ assert(!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
+ "OptimizeNone and AlwaysInline on same function!");
+
+ // We still need to handle naked functions even though optnone subsumes
+ // much of their semantics.
+ if (D->hasAttr<NakedAttr>())
+ B.addAttribute(llvm::Attribute::Naked);
+
+ // OptimizeNone wins over OptimizeForSize and MinSize.
+ F->removeFnAttr(llvm::Attribute::OptimizeForSize);
+ F->removeFnAttr(llvm::Attribute::MinSize);
+ } else if (D->hasAttr<NakedAttr>()) {
// Naked implies noinline: we should not be inlining such functions.
B.addAttribute(llvm::Attribute::Naked);
B.addAttribute(llvm::Attribute::NoInline);
@@ -880,41 +914,47 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
} else if (D->hasAttr<NoInlineAttr>()) {
B.addAttribute(llvm::Attribute::NoInline);
} else if (D->hasAttr<AlwaysInlineAttr>() &&
- !F->getAttributes().hasAttribute(llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::NoInline)) {
+ !F->hasFnAttribute(llvm::Attribute::NoInline)) {
// (noinline wins over always_inline, and we can't specify both in IR)
B.addAttribute(llvm::Attribute::AlwaysInline);
+ } else if (CodeGenOpts.getInlining() == CodeGenOptions::OnlyAlwaysInlining) {
+ // If we're not inlining, then force everything that isn't always_inline to
+ // carry an explicit noinline attribute.
+ if (!F->hasFnAttribute(llvm::Attribute::AlwaysInline))
+ B.addAttribute(llvm::Attribute::NoInline);
+ } else {
+ // Otherwise, propagate the inline hint attribute and potentially use its
+ // absence to mark things as noinline.
+ if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (any_of(FD->redecls(), [&](const FunctionDecl *Redecl) {
+ return Redecl->isInlineSpecified();
+ })) {
+ B.addAttribute(llvm::Attribute::InlineHint);
+ } else if (CodeGenOpts.getInlining() ==
+ CodeGenOptions::OnlyHintInlining &&
+ !FD->isInlined() &&
+ !F->hasFnAttribute(llvm::Attribute::AlwaysInline)) {
+ B.addAttribute(llvm::Attribute::NoInline);
+ }
+ }
}
- if (D->hasAttr<ColdAttr>()) {
- if (!D->hasAttr<OptimizeNoneAttr>())
+ // Add other optimization related attributes if we are optimizing this
+ // function.
+ if (!D->hasAttr<OptimizeNoneAttr>()) {
+ if (D->hasAttr<ColdAttr>()) {
B.addAttribute(llvm::Attribute::OptimizeForSize);
- B.addAttribute(llvm::Attribute::Cold);
- }
+ B.addAttribute(llvm::Attribute::Cold);
+ }
- if (D->hasAttr<MinSizeAttr>())
- B.addAttribute(llvm::Attribute::MinSize);
+ if (D->hasAttr<MinSizeAttr>())
+ B.addAttribute(llvm::Attribute::MinSize);
+ }
F->addAttributes(llvm::AttributeSet::FunctionIndex,
llvm::AttributeSet::get(
F->getContext(), llvm::AttributeSet::FunctionIndex, B));
- if (D->hasAttr<OptimizeNoneAttr>()) {
- // OptimizeNone implies noinline; we should not be inlining such functions.
- F->addFnAttr(llvm::Attribute::OptimizeNone);
- F->addFnAttr(llvm::Attribute::NoInline);
-
- // OptimizeNone wins over OptimizeForSize, MinSize, AlwaysInline.
- F->removeFnAttr(llvm::Attribute::OptimizeForSize);
- F->removeFnAttr(llvm::Attribute::MinSize);
- assert(!F->hasFnAttribute(llvm::Attribute::AlwaysInline) &&
- "OptimizeNone and AlwaysInline on same function!");
-
- // Attribute 'inlinehint' has no effect on 'optnone' functions.
- // Explicitly remove it from the set of function attributes.
- F->removeFnAttr(llvm::Attribute::InlineHint);
- }
-
unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
if (alignment)
F->setAlignment(alignment);
@@ -927,6 +967,11 @@ void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
F->setAlignment(2);
}
+
+ // In the cross-dso CFI mode, we want !type attributes on definitions only.
+ if (CodeGenOpts.SanitizeCfiCrossDso)
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ CreateFunctionTypeMetadata(FD, F);
}
void CodeGenModule::SetCommonAttributes(const Decl *D,
@@ -1009,10 +1054,6 @@ void CodeGenModule::CreateFunctionTypeMetadata(const FunctionDecl *FD,
// Additionally, if building with cross-DSO support...
if (CodeGenOpts.SanitizeCfiCrossDso) {
- // Don't emit entries for function declarations. In cross-DSO mode these are
- // handled with better precision at run time.
- if (!FD->hasBody())
- return;
// Skip available_externally functions. They won't be codegen'ed in the
// current module anyway.
if (getContext().GetGVALinkageForFunction(FD) == GVA_AvailableExternally)
@@ -1047,8 +1088,7 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
// where substantial code, including the libstdc++ dylib, was compiled with
// GCC and does not actually return "this".
if (!IsThunk && getCXXABI().HasThisReturn(GD) &&
- !(getTarget().getTriple().isiOS() &&
- getTarget().getTriple().isOSVersionLT(6))) {
+ !(getTriple().isiOS() && getTriple().isOSVersionLT(6))) {
assert(!F->arg_empty() &&
F->arg_begin()->getType()
->canLosslesslyBitCastTo(F->getReturnType()) &&
@@ -1086,7 +1126,10 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
if (MD->isVirtual())
F->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- CreateFunctionTypeMetadata(FD, F);
+ // Don't emit entries for function declarations in the cross-DSO mode. This
+ // is handled with better precision by the receiving DSO.
+ if (!CodeGenOpts.SanitizeCfiCrossDso)
+ CreateFunctionTypeMetadata(FD, F);
}
void CodeGenModule::addUsedGlobal(llvm::GlobalValue *GV) {
@@ -1282,7 +1325,7 @@ void CodeGenModule::EmitDeferred() {
// might had been created for another decl with the same mangled name but
// different type.
llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(
- GetAddrOfGlobal(D, /*IsForDefinition=*/true));
+ GetAddrOfGlobal(D, ForDefinition));
// In case of different address spaces, we may still get a cast, even with
// IsForDefinition equal to true. Query mangled names table to get
@@ -1681,6 +1724,8 @@ namespace {
: public RecursiveASTVisitor<DLLImportFunctionVisitor> {
bool SafeToInline = true;
+ bool shouldVisitImplicitCode() const { return true; }
+
bool VisitVarDecl(VarDecl *VD) {
// A thread-local variable cannot be imported.
SafeToInline = !VD->getTLSKind();
@@ -1696,6 +1741,10 @@ namespace {
SafeToInline = !V->hasGlobalStorage() || V->hasAttr<DLLImportAttr>();
return SafeToInline;
}
+ bool VisitCXXConstructExpr(CXXConstructExpr *E) {
+ SafeToInline = E->getConstructor()->hasAttr<DLLImportAttr>();
+ return SafeToInline;
+ }
bool VisitCXXDeleteExpr(CXXDeleteExpr *E) {
SafeToInline = E->getOperatorDelete()->hasAttr<DLLImportAttr>();
return SafeToInline;
@@ -1728,8 +1777,17 @@ CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
return Walker.Result;
}
-bool
-CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
+// Check if T is a class type with a destructor that's not dllimport.
+static bool HasNonDllImportDtor(QualType T) {
+ if (const RecordType *RT = dyn_cast<RecordType>(T))
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (RD->getDestructor() && !RD->getDestructor()->hasAttr<DLLImportAttr>())
+ return true;
+
+ return false;
+}
+
+bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
if (getFunctionLinkage(GD) != llvm::Function::AvailableExternallyLinkage)
return true;
const auto *F = cast<FunctionDecl>(GD.getDecl());
@@ -1742,6 +1800,18 @@ CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
if (!Visitor.SafeToInline)
return false;
+
+ if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(F)) {
+ // Implicit destructor invocations aren't captured in the AST, so the
+ // check above can't see them. Check for them manually here.
+ for (const Decl *Member : Dtor->getParent()->decls())
+ if (isa<FieldDecl>(Member))
+ if (HasNonDllImportDtor(cast<FieldDecl>(Member)->getType()))
+ return false;
+ for (const CXXBaseSpecifier &B : Dtor->getParent()->bases())
+ if (HasNonDllImportDtor(B.getType()))
+ return false;
+ }
}
// PR9614. Avoid cases where the source code is lying to us. An available
@@ -1823,7 +1893,7 @@ CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
GlobalDecl GD, bool ForVTable,
bool DontDefer, bool IsThunk,
llvm::AttributeSet ExtraAttrs,
- bool IsForDefinition) {
+ ForDefinition_t IsForDefinition) {
const Decl *D = GD.getDecl();
// Lookup the entry, lazily creating it if necessary.
@@ -1983,7 +2053,7 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
llvm::Type *Ty,
bool ForVTable,
bool DontDefer,
- bool IsForDefinition) {
+ ForDefinition_t IsForDefinition) {
// If there was no specific requested type, just convert it now.
if (!Ty) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
@@ -1997,18 +2067,70 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
IsForDefinition);
}
+static const FunctionDecl *
+GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
+ TranslationUnitDecl *TUDecl = C.getTranslationUnitDecl();
+ DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
+
+ IdentifierInfo &CII = C.Idents.get(Name);
+ for (const auto &Result : DC->lookup(&CII))
+ if (const auto FD = dyn_cast<FunctionDecl>(Result))
+ return FD;
+
+ if (!C.getLangOpts().CPlusPlus)
+ return nullptr;
+
+ // Demangle the premangled name from getTerminateFn()
+ IdentifierInfo &CXXII =
+ (Name == "_ZSt9terminatev" || Name == "\01?terminate@@YAXXZ")
+ ? C.Idents.get("terminate")
+ : C.Idents.get(Name);
+
+ for (const auto &N : {"__cxxabiv1", "std"}) {
+ IdentifierInfo &NS = C.Idents.get(N);
+ for (const auto &Result : DC->lookup(&NS)) {
+ NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Result);
+ if (auto LSD = dyn_cast<LinkageSpecDecl>(Result))
+ for (const auto &Result : LSD->lookup(&NS))
+ if ((ND = dyn_cast<NamespaceDecl>(Result)))
+ break;
+
+ if (ND)
+ for (const auto &Result : ND->lookup(&CXXII))
+ if (const auto *FD = dyn_cast<FunctionDecl>(Result))
+ return FD;
+ }
+ }
+
+ return nullptr;
+}
+
/// CreateRuntimeFunction - Create a new runtime function with the specified
/// type and name.
llvm::Constant *
-CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
- StringRef Name,
- llvm::AttributeSet ExtraAttrs) {
+CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy, StringRef Name,
+ llvm::AttributeSet ExtraAttrs,
+ bool Local) {
llvm::Constant *C =
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
- /*DontDefer=*/false, /*IsThunk=*/false, ExtraAttrs);
- if (auto *F = dyn_cast<llvm::Function>(C))
- if (F->empty())
+ /*DontDefer=*/false, /*IsThunk=*/false,
+ ExtraAttrs);
+
+ if (auto *F = dyn_cast<llvm::Function>(C)) {
+ if (F->empty()) {
F->setCallingConv(getRuntimeCC());
+
+ if (!Local && getTriple().isOSBinFormatCOFF() &&
+ !getCodeGenOpts().LTOVisibilityPublicStd) {
+ const FunctionDecl *FD = GetRuntimeFunctionDecl(Context, Name);
+ if (!FD || FD->hasAttr<DLLImportAttr>()) {
+ F->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ F->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ }
+ }
+ }
+ }
+
return C;
}
@@ -2062,7 +2184,7 @@ llvm::Constant *
CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::PointerType *Ty,
const VarDecl *D,
- bool IsForDefinition) {
+ ForDefinition_t IsForDefinition) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
@@ -2162,7 +2284,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
}
// Handle XCore specific ABI requirements.
- if (getTarget().getTriple().getArch() == llvm::Triple::xcore &&
+ if (getTriple().getArch() == llvm::Triple::xcore &&
D->getLanguageLinkage() == CLanguageLinkage &&
D->getType().isConstant(Context) &&
isExternallyVisible(D->getLinkageAndVisibility().getLinkage()))
@@ -2177,30 +2299,31 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::Constant *
CodeGenModule::GetAddrOfGlobal(GlobalDecl GD,
- bool IsForDefinition) {
- if (isa<CXXConstructorDecl>(GD.getDecl()))
- return getAddrOfCXXStructor(cast<CXXConstructorDecl>(GD.getDecl()),
+ ForDefinition_t IsForDefinition) {
+ const Decl *D = GD.getDecl();
+ if (isa<CXXConstructorDecl>(D))
+ return getAddrOfCXXStructor(cast<CXXConstructorDecl>(D),
getFromCtorType(GD.getCtorType()),
/*FnInfo=*/nullptr, /*FnType=*/nullptr,
/*DontDefer=*/false, IsForDefinition);
- else if (isa<CXXDestructorDecl>(GD.getDecl()))
- return getAddrOfCXXStructor(cast<CXXDestructorDecl>(GD.getDecl()),
+ else if (isa<CXXDestructorDecl>(D))
+ return getAddrOfCXXStructor(cast<CXXDestructorDecl>(D),
getFromDtorType(GD.getDtorType()),
/*FnInfo=*/nullptr, /*FnType=*/nullptr,
/*DontDefer=*/false, IsForDefinition);
- else if (isa<CXXMethodDecl>(GD.getDecl())) {
+ else if (isa<CXXMethodDecl>(D)) {
auto FInfo = &getTypes().arrangeCXXMethodDeclaration(
- cast<CXXMethodDecl>(GD.getDecl()));
+ cast<CXXMethodDecl>(D));
auto Ty = getTypes().GetFunctionType(*FInfo);
return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
IsForDefinition);
- } else if (isa<FunctionDecl>(GD.getDecl())) {
+ } else if (isa<FunctionDecl>(D)) {
const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
return GetAddrOfFunction(GD, Ty, /*ForVTable=*/false, /*DontDefer=*/false,
IsForDefinition);
} else
- return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()), /*Ty=*/nullptr,
+ return GetAddrOfGlobalVar(cast<VarDecl>(D), /*Ty=*/nullptr,
IsForDefinition);
}
@@ -2254,7 +2377,7 @@ CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
/// variable with the same mangled name but some other type.
llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
llvm::Type *Ty,
- bool IsForDefinition) {
+ ForDefinition_t IsForDefinition) {
assert(D->hasGlobalStorage() && "Not a global variable");
QualType ASTTy = D->getType();
if (!Ty)
@@ -2385,8 +2508,13 @@ void CodeGenModule::maybeSetTrivialComdat(const Decl &D,
/// Pass IsTentative as true if you want to create a tentative definition.
void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
bool IsTentative) {
- llvm::Constant *Init = nullptr;
+ // OpenCL global variables of sampler type are translated to function calls,
+ // therefore no need to be translated.
QualType ASTTy = D->getType();
+ if (getLangOpts().OpenCL && ASTTy->isSamplerT())
+ return;
+
+ llvm::Constant *Init = nullptr;
CXXRecordDecl *RD = ASTTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
bool NeedsGlobalCtor = false;
bool NeedsGlobalDtor = RD && !RD->hasTrivialDestructor();
@@ -2439,7 +2567,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
llvm::Type* InitType = Init->getType();
llvm::Constant *Entry =
- GetAddrOfGlobalVar(D, InitType, /*IsForDefinition=*/!IsTentative);
+ GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative));
// Strip off a bitcast if we got one back.
if (auto *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
@@ -2472,7 +2600,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// Make a new global with the correct type, this is now guaranteed to work.
GV = cast<llvm::GlobalVariable>(
- GetAddrOfGlobalVar(D, InitType, /*IsForDefinition=*/!IsTentative));
+ GetAddrOfGlobalVar(D, InitType, ForDefinition_t(!IsTentative)));
// Replace all uses of the old global with the new global
llvm::Constant *NewPtrForOldDecl =
@@ -2565,9 +2693,16 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
else
GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
- if (Linkage == llvm::GlobalVariable::CommonLinkage)
+ if (Linkage == llvm::GlobalVariable::CommonLinkage) {
// common vars aren't constant even if declared const.
GV->setConstant(false);
+ // Tentative definition of global variables may be initialized with
+ // non-zero null pointers. In this case they should have weak linkage
+ // since common linkage must have zero initializer and must not have
+ // explicit section therefore cannot have non-zero initial value.
+ if (!GV->getInitializer()->isNullValue())
+ GV->setLinkage(llvm::GlobalVariable::WeakAnyLinkage);
+ }
setNonAliasAttributes(D, GV);
@@ -2876,7 +3011,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD,
if (!GV || (GV->getType()->getElementType() != Ty))
GV = cast<llvm::GlobalValue>(GetAddrOfFunction(GD, Ty, /*ForVTable=*/false,
/*DontDefer=*/true,
- /*IsForDefinition=*/true));
+ ForDefinition));
// Already emitted.
if (!GV->isDeclaration())
@@ -3067,13 +3202,12 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
// Otherwise, convert the UTF8 literals into a string of shorts.
IsUTF16 = true;
- SmallVector<UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
- const UTF8 *FromPtr = (const UTF8 *)String.data();
- UTF16 *ToPtr = &ToBuf[0];
+ SmallVector<llvm::UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
+ const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
+ llvm::UTF16 *ToPtr = &ToBuf[0];
- (void)ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
- &ToPtr, ToPtr + NumBytes,
- strictConversion);
+ (void)llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
+ ToPtr + NumBytes, llvm::strictConversion);
// ConvertUTF8toUTF16 returns the length in ToPtr.
StringLength = ToPtr - &ToBuf[0];
@@ -3086,14 +3220,6 @@ GetConstantCFStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
nullptr)).first;
}
-static llvm::StringMapEntry<llvm::GlobalVariable *> &
-GetConstantStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
- const StringLiteral *Literal, unsigned &StringLength) {
- StringRef String = Literal->getString();
- StringLength = String.size();
- return *Map.insert(std::make_pair(String, nullptr)).first;
-}
-
ConstantAddress
CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
unsigned StringLength = 0;
@@ -3108,7 +3234,6 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
llvm::Constant *Zeros[] = { Zero, Zero };
- llvm::Value *V;
// If we don't already have it, get __CFConstantStringClassReference.
if (!CFConstantStringClassRef) {
@@ -3117,7 +3242,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
llvm::Constant *GV =
CreateRuntimeVariable(Ty, "__CFConstantStringClassReference");
- if (getTarget().getTriple().isOSBinFormatCOFF()) {
+ if (getTriple().isOSBinFormatCOFF()) {
IdentifierInfo &II = getContext().Idents.get(GV->getName());
TranslationUnitDecl *TUDecl = getContext().getTranslationUnitDecl();
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
@@ -3138,25 +3263,22 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
}
// Decay array -> ptr
- V = llvm::ConstantExpr::getGetElementPtr(Ty, GV, Zeros);
- CFConstantStringClassRef = V;
- } else {
- V = CFConstantStringClassRef;
+ CFConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(Ty, GV, Zeros);
}
QualType CFTy = getContext().getCFConstantStringType();
auto *STy = cast<llvm::StructType>(getTypes().ConvertType(CFTy));
- llvm::Constant *Fields[4];
+ ConstantInitBuilder Builder(*this);
+ auto Fields = Builder.beginStruct(STy);
// Class pointer.
- Fields[0] = cast<llvm::ConstantExpr>(V);
+ Fields.add(cast<llvm::ConstantExpr>(CFConstantStringClassRef));
// Flags.
- llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
- Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0)
- : llvm::ConstantInt::get(Ty, 0x07C8);
+ Fields.addInt(IntTy, isUTF16 ? 0x07d0 : 0x07C8);
// String pointer.
llvm::Constant *C = nullptr;
@@ -3185,31 +3307,30 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
// FIXME: We set the section explicitly to avoid a bug in ld64 224.1.
// Without it LLVM can merge the string with a non unnamed_addr one during
// LTO. Doing that changes the section it ends in, which surprises ld64.
- if (getTarget().getTriple().isOSBinFormatMachO())
+ if (getTriple().isOSBinFormatMachO())
GV->setSection(isUTF16 ? "__TEXT,__ustring"
: "__TEXT,__cstring,cstring_literals");
// String.
- Fields[2] =
+ llvm::Constant *Str =
llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
if (isUTF16)
// Cast the UTF16 string to the correct type.
- Fields[2] = llvm::ConstantExpr::getBitCast(Fields[2], Int8PtrTy);
+ Str = llvm::ConstantExpr::getBitCast(Str, Int8PtrTy);
+ Fields.add(Str);
// String length.
- Ty = getTypes().ConvertType(getContext().LongTy);
- Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
+ auto Ty = getTypes().ConvertType(getContext().LongTy);
+ Fields.addInt(cast<llvm::IntegerType>(Ty), StringLength);
CharUnits Alignment = getPointerAlign();
// The struct.
- C = llvm::ConstantStruct::get(STy, Fields);
- GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
- llvm::GlobalVariable::PrivateLinkage, C,
- "_unnamed_cfstring_");
- GV->setAlignment(Alignment.getQuantity());
- switch (getTarget().getTriple().getObjectFormat()) {
+ GV = Fields.finishAndCreateGlobal("_unnamed_cfstring_", Alignment,
+ /*isConstant=*/false,
+ llvm::GlobalVariable::PrivateLinkage);
+ switch (getTriple().getObjectFormat()) {
case llvm::Triple::UnknownObjectFormat:
llvm_unreachable("unknown file format");
case llvm::Triple::COFF:
@@ -3225,124 +3346,6 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
return ConstantAddress(GV, Alignment);
}
-ConstantAddress
-CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
- unsigned StringLength = 0;
- llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
- GetConstantStringEntry(CFConstantStringMap, Literal, StringLength);
-
- if (auto *C = Entry.second)
- return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
-
- llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
- llvm::Constant *Zeros[] = { Zero, Zero };
- llvm::Value *V;
- // If we don't already have it, get _NSConstantStringClassReference.
- if (!ConstantStringClassRef) {
- std::string StringClass(getLangOpts().ObjCConstantStringClass);
- llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
- llvm::Constant *GV;
- if (LangOpts.ObjCRuntime.isNonFragile()) {
- std::string str =
- StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
- : "OBJC_CLASS_$_" + StringClass;
- GV = getObjCRuntime().GetClassGlobal(str);
- // Make sure the result is of the correct type.
- llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
- V = llvm::ConstantExpr::getBitCast(GV, PTy);
- ConstantStringClassRef = V;
- } else {
- std::string str =
- StringClass.empty() ? "_NSConstantStringClassReference"
- : "_" + StringClass + "ClassReference";
- llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
- GV = CreateRuntimeVariable(PTy, str);
- // Decay array -> ptr
- V = llvm::ConstantExpr::getGetElementPtr(PTy, GV, Zeros);
- ConstantStringClassRef = V;
- }
- } else
- V = ConstantStringClassRef;
-
- if (!NSConstantStringType) {
- // Construct the type for a constant NSString.
- RecordDecl *D = Context.buildImplicitRecord("__builtin_NSString");
- D->startDefinition();
-
- QualType FieldTypes[3];
-
- // const int *isa;
- FieldTypes[0] = Context.getPointerType(Context.IntTy.withConst());
- // const char *str;
- FieldTypes[1] = Context.getPointerType(Context.CharTy.withConst());
- // unsigned int length;
- FieldTypes[2] = Context.UnsignedIntTy;
-
- // Create fields
- for (unsigned i = 0; i < 3; ++i) {
- FieldDecl *Field = FieldDecl::Create(Context, D,
- SourceLocation(),
- SourceLocation(), nullptr,
- FieldTypes[i], /*TInfo=*/nullptr,
- /*BitWidth=*/nullptr,
- /*Mutable=*/false,
- ICIS_NoInit);
- Field->setAccess(AS_public);
- D->addDecl(Field);
- }
-
- D->completeDefinition();
- QualType NSTy = Context.getTagDeclType(D);
- NSConstantStringType = cast<llvm::StructType>(getTypes().ConvertType(NSTy));
- }
-
- llvm::Constant *Fields[3];
-
- // Class pointer.
- Fields[0] = cast<llvm::ConstantExpr>(V);
-
- // String pointer.
- llvm::Constant *C =
- llvm::ConstantDataArray::getString(VMContext, Entry.first());
-
- llvm::GlobalValue::LinkageTypes Linkage;
- bool isConstant;
- Linkage = llvm::GlobalValue::PrivateLinkage;
- isConstant = !LangOpts.WritableStrings;
-
- auto *GV = new llvm::GlobalVariable(getModule(), C->getType(), isConstant,
- Linkage, C, ".str");
- GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- // Don't enforce the target's minimum global alignment, since the only use
- // of the string is via this class initializer.
- CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
- GV->setAlignment(Align.getQuantity());
- Fields[1] =
- llvm::ConstantExpr::getGetElementPtr(GV->getValueType(), GV, Zeros);
-
- // String length.
- llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
- Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
-
- // The struct.
- CharUnits Alignment = getPointerAlign();
- C = llvm::ConstantStruct::get(NSConstantStringType, Fields);
- GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
- llvm::GlobalVariable::PrivateLinkage, C,
- "_unnamed_nsstring_");
- GV->setAlignment(Alignment.getQuantity());
- const char *NSStringSection = "__OBJC,__cstring_object,regular,no_dead_strip";
- const char *NSStringNonFragileABISection =
- "__DATA,__objc_stringobj,regular,no_dead_strip";
- // FIXME. Fix section.
- GV->setSection(LangOpts.ObjCRuntime.isNonFragile()
- ? NSStringNonFragileABISection
- : NSStringSection);
- Entry.second = GV;
-
- return ConstantAddress(GV, Alignment);
-}
-
QualType CodeGenModule::getObjCFastEnumerationStateType() {
if (ObjCFastEnumerationStateType.isNull()) {
RecordDecl *D = Context.buildImplicitRecord("__objcFastEnumerationState");
@@ -3710,17 +3713,6 @@ void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
D->setHasNonZeroConstructors(true);
}
-/// EmitNamespace - Emit all declarations in a namespace.
-void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
- for (auto *I : ND->decls()) {
- if (const auto *VD = dyn_cast<VarDecl>(I))
- if (VD->getTemplateSpecializationKind() != TSK_ExplicitSpecialization &&
- VD->getTemplateSpecializationKind() != TSK_Undeclared)
- continue;
- EmitTopLevelDecl(I);
- }
-}
-
// EmitLinkageSpec - Emit all declarations in a linkage spec.
void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
@@ -3729,13 +3721,21 @@ void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
return;
}
- for (auto *I : LSD->decls()) {
- // Meta-data for ObjC class includes references to implemented methods.
- // Generate class's method definitions first.
+ EmitDeclContext(LSD);
+}
+
+void CodeGenModule::EmitDeclContext(const DeclContext *DC) {
+ for (auto *I : DC->decls()) {
+ // Unlike other DeclContexts, the contents of an ObjCImplDecl at TU scope
+ // are themselves considered "top-level", so EmitTopLevelDecl on an
+ // ObjCImplDecl does not recursively visit them. We need to do that in
+ // case they're nested inside another construct (LinkageSpecDecl /
+ // ExportDecl) that does stop them from being considered "top-level".
if (auto *OID = dyn_cast<ObjCImplDecl>(I)) {
for (auto *M : OID->methods())
EmitTopLevelDecl(M);
}
+
EmitTopLevelDecl(I);
}
}
@@ -3762,11 +3762,16 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
case Decl::Var:
+ case Decl::Decomposition:
// Skip variable templates
if (cast<VarDecl>(D)->getDescribedVarTemplate())
return;
case Decl::VarTemplateSpecialization:
EmitGlobal(cast<VarDecl>(D));
+ if (auto *DD = dyn_cast<DecompositionDecl>(D))
+ for (auto *B : DD->bindings())
+ if (auto *HD = B->getHoldingVar())
+ EmitGlobal(HD);
break;
// Indirect fields from global anonymous structs and unions can be
@@ -3776,7 +3781,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// C++ Decls
case Decl::Namespace:
- EmitNamespace(cast<NamespaceDecl>(D));
+ EmitDeclContext(cast<NamespaceDecl>(D));
break;
case Decl::CXXRecord:
// Emit any static data members, they may be definitions.
@@ -3911,16 +3916,50 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
case Decl::Import: {
auto *Import = cast<ImportDecl>(D);
- // Ignore import declarations that come from imported modules.
- if (Import->getImportedOwningModule())
+ // If we've already imported this module, we're done.
+ if (!ImportedModules.insert(Import->getImportedModule()))
break;
- if (CGDebugInfo *DI = getModuleDebugInfo())
- DI->EmitImportDecl(*Import);
- ImportedModules.insert(Import->getImportedModule());
+ // Emit debug information for direct imports.
+ if (!Import->getImportedOwningModule()) {
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitImportDecl(*Import);
+ }
+
+ // Find all of the submodules and emit the module initializers.
+ llvm::SmallPtrSet<clang::Module *, 16> Visited;
+ SmallVector<clang::Module *, 16> Stack;
+ Visited.insert(Import->getImportedModule());
+ Stack.push_back(Import->getImportedModule());
+
+ while (!Stack.empty()) {
+ clang::Module *Mod = Stack.pop_back_val();
+ if (!EmittedModuleInitializers.insert(Mod).second)
+ continue;
+
+ for (auto *D : Context.getModuleInitializers(Mod))
+ EmitTopLevelDecl(D);
+
+ // Visit the submodules of this module.
+ for (clang::Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub) {
+ // Skip explicit children; they need to be explicitly imported to emit
+ // the initializers.
+ if ((*Sub)->IsExplicit)
+ continue;
+
+ if (Visited.insert(*Sub).second)
+ Stack.push_back(*Sub);
+ }
+ }
break;
}
+ case Decl::Export:
+ EmitDeclContext(cast<ExportDecl>(D));
+ break;
+
case Decl::OMPThreadPrivate:
EmitOMPThreadPrivateDecl(cast<OMPThreadPrivateDecl>(D));
break;
@@ -4153,18 +4192,24 @@ void CodeGenModule::EmitTargetMetadata() {
}
void CodeGenModule::EmitCoverageFile() {
- if (!getCodeGenOpts().CoverageFile.empty()) {
- if (llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu")) {
- llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
- llvm::LLVMContext &Ctx = TheModule.getContext();
- llvm::MDString *CoverageFile =
- llvm::MDString::get(Ctx, getCodeGenOpts().CoverageFile);
- for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
- llvm::MDNode *CU = CUNode->getOperand(i);
- llvm::Metadata *Elts[] = {CoverageFile, CU};
- GCov->addOperand(llvm::MDNode::get(Ctx, Elts));
- }
- }
+ if (getCodeGenOpts().CoverageDataFile.empty() &&
+ getCodeGenOpts().CoverageNotesFile.empty())
+ return;
+
+ llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu");
+ if (!CUNode)
+ return;
+
+ llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+ auto *CoverageDataFile =
+ llvm::MDString::get(Ctx, getCodeGenOpts().CoverageDataFile);
+ auto *CoverageNotesFile =
+ llvm::MDString::get(Ctx, getCodeGenOpts().CoverageNotesFile);
+ for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
+ llvm::MDNode *CU = CUNode->getOperand(i);
+ llvm::Metadata *Elts[] = {CoverageNotesFile, CoverageDataFile, CU};
+ GCov->addOperand(llvm::MDNode::get(Ctx, Elts));
}
}
@@ -4311,3 +4356,13 @@ llvm::SanitizerStatReport &CodeGenModule::getSanStats() {
return *SanStats;
}
+llvm::Value *
+CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E,
+ CodeGenFunction &CGF) {
+ llvm::Constant *C = EmitConstantExpr(E, E->getType(), &CGF);
+ auto SamplerT = getOpenCLRuntime().getSamplerType();
+ auto FTy = llvm::FunctionType::get(SamplerT, {C->getType()}, false);
+ return CGF.Builder.CreateCall(CreateRuntimeFunction(FTy,
+ "__translate_sampler_initializer"),
+ {C});
+}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 94904997d629..1d72b4edeb13 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -94,6 +94,11 @@ class FunctionArgList;
class CoverageMappingModuleGen;
class TargetCodeGenInfo;
+enum ForDefinition_t : bool {
+ NotForDefinition = false,
+ ForDefinition = true
+};
+
struct OrderGlobalInits {
unsigned int priority;
unsigned int lex_order;
@@ -420,6 +425,10 @@ private:
/// \brief The complete set of modules that has been imported.
llvm::SetVector<clang::Module *> ImportedModules;
+ /// \brief The set of modules for which the module initializers
+ /// have been emitted.
+ llvm::SmallPtrSet<clang::Module *, 16> EmittedModuleInitializers;
+
/// \brief A vector of metadata strings.
SmallVector<llvm::Metadata *, 16> LinkerOptionsMetadata;
@@ -430,13 +439,6 @@ private:
/// int * but is actually an Obj-C class pointer.
llvm::WeakVH CFConstantStringClassRef;
- /// Cached reference to the class for constant strings. This value has type
- /// int * but is actually an Obj-C class pointer.
- llvm::WeakVH ConstantStringClassRef;
-
- /// \brief The LLVM type corresponding to NSConstantString.
- llvm::StructType *NSConstantStringType = nullptr;
-
/// \brief The type used to describe the state of a fast enumeration in
/// Objective-C's for..in loop.
QualType ObjCFastEnumerationStateType;
@@ -453,6 +455,14 @@ private:
bool isTriviallyRecursive(const FunctionDecl *F);
bool shouldEmitFunction(GlobalDecl GD);
+ /// Map used to be sure we don't emit the same CompoundLiteral twice.
+ llvm::DenseMap<const CompoundLiteralExpr *, llvm::GlobalVariable *>
+ EmittedCompoundLiterals;
+
+ /// Map of the global blocks we've emitted, so that we don't have to re-emit
+ /// them if the constexpr evaluator gets aggressive.
+ llvm::DenseMap<const BlockExpr *, llvm::Constant *> EmittedGlobalBlocks;
+
/// @name Cache for Blocks Runtime Globals
/// @{
@@ -610,7 +620,7 @@ public:
return TheModule.getDataLayout();
}
const TargetInfo &getTarget() const { return Target; }
- const llvm::Triple &getTriple() const;
+ const llvm::Triple &getTriple() const { return Target.getTriple(); }
bool supportsCOMDAT() const;
void maybeSetTrivialComdat(const Decl &D, llvm::GlobalObject &GO);
@@ -679,7 +689,9 @@ public:
llvm_unreachable("unknown visibility!");
}
- llvm::Constant *GetAddrOfGlobal(GlobalDecl GD, bool IsForDefinition = false);
+ llvm::Constant *GetAddrOfGlobal(GlobalDecl GD,
+ ForDefinition_t IsForDefinition
+ = NotForDefinition);
/// Will return a global variable of the given type. If a variable with a
/// different type already exists then a new variable with the right type
@@ -709,14 +721,16 @@ public:
/// the same mangled name but some other type.
llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
llvm::Type *Ty = nullptr,
- bool IsForDefinition = false);
+ ForDefinition_t IsForDefinition
+ = NotForDefinition);
/// Return the address of the given function. If Ty is non-null, then this
/// function will use the specified type if it has to create it.
llvm::Constant *GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty = nullptr,
bool ForVTable = false,
bool DontDefer = false,
- bool IsForDefinition = false);
+ ForDefinition_t IsForDefinition
+ = NotForDefinition);
/// Get the address of the RTTI descriptor for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
@@ -769,7 +783,17 @@ public:
llvm::Type *getGenericBlockLiteralType();
/// Gets the address of a block which requires no captures.
- llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+ llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, StringRef Name);
+
+ /// Returns the address of a block which requires no caputres, or null if
+ /// we've yet to emit the block for BE.
+ llvm::Constant *getAddrOfGlobalBlockIfEmitted(const BlockExpr *BE) {
+ return EmittedGlobalBlocks.lookup(BE);
+ }
+
+ /// Notes that BE's global block is available via Addr. Asserts that BE
+ /// isn't already emitted.
+ void setAddrOfGlobalBlock(const BlockExpr *BE, llvm::Constant *Addr);
/// Return a pointer to a constant CFString object for the given string.
ConstantAddress GetAddrOfConstantCFString(const StringLiteral *Literal);
@@ -804,6 +828,16 @@ public:
/// compound literal expression.
ConstantAddress GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E);
+ /// If it's been emitted already, returns the GlobalVariable corresponding to
+ /// a compound literal. Otherwise, returns null.
+ llvm::GlobalVariable *
+ getAddrOfConstantCompoundLiteralIfEmitted(const CompoundLiteralExpr *E);
+
+ /// Notes that CLE's GlobalVariable is GV. Asserts that CLE isn't already
+ /// emitted.
+ void setAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *CLE,
+ llvm::GlobalVariable *GV);
+
/// \brief Returns a pointer to a global variable representing a temporary
/// with static or thread storage duration.
ConstantAddress GetAddrOfGlobalTemporary(const MaterializeTemporaryExpr *E,
@@ -824,12 +858,13 @@ public:
getAddrOfCXXStructor(const CXXMethodDecl *MD, StructorType Type,
const CGFunctionInfo *FnInfo = nullptr,
llvm::FunctionType *FnType = nullptr,
- bool DontDefer = false, bool IsForDefinition = false);
+ bool DontDefer = false,
+ ForDefinition_t IsForDefinition = NotForDefinition);
/// Given a builtin id for a function like "__builtin_fabsf", return a
/// Function* for "fabsf".
- llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
- unsigned BuiltinID);
+ llvm::Constant *getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID);
llvm::Function *getIntrinsic(unsigned IID, ArrayRef<llvm::Type*> Tys = None);
@@ -869,10 +904,11 @@ public:
}
/// Create a new runtime function with the specified type and name.
- llvm::Constant *CreateRuntimeFunction(llvm::FunctionType *Ty,
- StringRef Name,
- llvm::AttributeSet ExtraAttrs =
- llvm::AttributeSet());
+ llvm::Constant *
+ CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name,
+ llvm::AttributeSet ExtraAttrs = llvm::AttributeSet(),
+ bool Local = false);
+
/// Create a new compiler builtin function with the specified type and name.
llvm::Constant *CreateBuiltinFunction(llvm::FunctionType *Ty,
StringRef Name,
@@ -1145,18 +1181,27 @@ public:
llvm::SanitizerStatReport &getSanStats();
+ llvm::Value *
+ createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF);
+
+ /// Get target specific null pointer.
+ /// \param T is the LLVM type of the null pointer.
+ /// \param QT is the clang QualType of the null pointer.
+ llvm::Constant *getNullPointer(llvm::PointerType *T, QualType QT);
+
private:
llvm::Constant *
GetOrCreateLLVMFunction(StringRef MangledName, llvm::Type *Ty, GlobalDecl D,
bool ForVTable, bool DontDefer = false,
bool IsThunk = false,
llvm::AttributeSet ExtraAttrs = llvm::AttributeSet(),
- bool IsForDefinition = false);
+ ForDefinition_t IsForDefinition = NotForDefinition);
llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
llvm::PointerType *PTy,
const VarDecl *D,
- bool IsForDefinition = false);
+ ForDefinition_t IsForDefinition
+ = NotForDefinition);
void setNonAliasAttributes(const Decl *D, llvm::GlobalObject *GO);
@@ -1175,7 +1220,7 @@ private:
// C++ related functions.
- void EmitNamespace(const NamespaceDecl *D);
+ void EmitDeclContext(const DeclContext *DC);
void EmitLinkageSpec(const LinkageSpecDecl *D);
void CompleteDIClassType(const CXXMethodDecl* D);
@@ -1202,10 +1247,10 @@ private:
llvm::Constant *AssociatedData = nullptr);
void AddGlobalDtor(llvm::Function *Dtor, int Priority = 65535);
- /// Generates a global array of functions and priorities using the given list
- /// and name. This array will have appending linkage and is suitable for use
- /// as a LLVM constructor or destructor array.
- void EmitCtorList(const CtorList &Fns, const char *GlobalName);
+ /// EmitCtorList - Generates a global array of functions and priorities using
+ /// the given list and name. This array will have appending linkage and is
+ /// suitable for use as a LLVM constructor or destructor array. Clears Fns.
+ void EmitCtorList(CtorList &Fns, const char *GlobalName);
/// Emit any needed decls for which code generation was deferred.
void EmitDeferred();
diff --git a/lib/CodeGen/CodeGenPGO.cpp b/lib/CodeGen/CodeGenPGO.cpp
index 4eefdd72b7e4..c6c3fa41e628 100644
--- a/lib/CodeGen/CodeGenPGO.cpp
+++ b/lib/CodeGen/CodeGenPGO.cpp
@@ -458,6 +458,8 @@ struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> {
void VisitSwitchStmt(const SwitchStmt *S) {
RecordStmtCount(S);
+ if (S->getInit())
+ Visit(S->getInit());
Visit(S->getCond());
CurrentCount = 0;
BreakContinueStack.push_back(BreakContinue());
@@ -488,6 +490,8 @@ struct ComputeRegionCounts : public ConstStmtVisitor<ComputeRegionCounts> {
void VisitIfStmt(const IfStmt *S) {
RecordStmtCount(S);
uint64_t ParentCount = CurrentCount;
+ if (S->getInit())
+ Visit(S->getInit());
Visit(S->getCond());
// Counter tracks the "then" part of an if statement. The count for
diff --git a/lib/CodeGen/CodeGenPGO.h b/lib/CodeGen/CodeGenPGO.h
index d03f23535bb9..4f229cde63b0 100644
--- a/lib/CodeGen/CodeGenPGO.h
+++ b/lib/CodeGen/CodeGenPGO.h
@@ -18,9 +18,7 @@
#include "CodeGenModule.h"
#include "CodeGenTypes.h"
#include "clang/Frontend/CodeGenOptions.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ProfileData/InstrProfReader.h"
-#include "llvm/Support/MemoryBuffer.h"
#include <array>
#include <memory>
diff --git a/lib/CodeGen/CodeGenTypeCache.h b/lib/CodeGen/CodeGenTypeCache.h
index c32b66d129da..47e26bcaa1b6 100644
--- a/lib/CodeGen/CodeGenTypeCache.h
+++ b/lib/CodeGen/CodeGenTypeCache.h
@@ -80,9 +80,14 @@ struct CodeGenTypeCache {
union {
unsigned char PointerAlignInBytes;
unsigned char PointerSizeInBytes;
+ };
+
+ /// The size and alignment of size_t.
+ union {
unsigned char SizeSizeInBytes; // sizeof(size_t)
unsigned char SizeAlignInBytes;
};
+
CharUnits getSizeSize() const {
return CharUnits::fromQuantity(SizeSizeInBytes);
}
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index ebe55c70d817..b95b4fff5734 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -286,21 +286,21 @@ void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
const llvm::fltSemantics &format,
bool UseNativeHalf = false) {
- if (&format == &llvm::APFloat::IEEEhalf) {
+ if (&format == &llvm::APFloat::IEEEhalf()) {
if (UseNativeHalf)
return llvm::Type::getHalfTy(VMContext);
else
return llvm::Type::getInt16Ty(VMContext);
}
- if (&format == &llvm::APFloat::IEEEsingle)
+ if (&format == &llvm::APFloat::IEEEsingle())
return llvm::Type::getFloatTy(VMContext);
- if (&format == &llvm::APFloat::IEEEdouble)
+ if (&format == &llvm::APFloat::IEEEdouble())
return llvm::Type::getDoubleTy(VMContext);
- if (&format == &llvm::APFloat::IEEEquad)
+ if (&format == &llvm::APFloat::IEEEquad())
return llvm::Type::getFP128Ty(VMContext);
- if (&format == &llvm::APFloat::PPCDoubleDouble)
+ if (&format == &llvm::APFloat::PPCDoubleDouble())
return llvm::Type::getPPC_FP128Ty(VMContext);
- if (&format == &llvm::APFloat::x87DoubleExtended)
+ if (&format == &llvm::APFloat::x87DoubleExtended())
return llvm::Type::getX86_FP80Ty(VMContext);
llvm_unreachable("Unknown float format!");
}
@@ -736,10 +736,14 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
return *Layout;
}
+bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
+ assert (T->isAnyPointerType() && "Invalid type");
+ return isZeroInitializable(T);
+}
+
bool CodeGenTypes::isZeroInitializable(QualType T) {
- // No need to check for member pointers when not compiling C++.
- if (!Context.getLangOpts().CPlusPlus)
- return true;
+ if (T->getAs<PointerType>())
+ return Context.getTargetNullPointerValue(T) == 0;
if (const auto *AT = Context.getAsArrayType(T)) {
if (isa<IncompleteArrayType>(AT))
@@ -753,7 +757,7 @@ bool CodeGenTypes::isZeroInitializable(QualType T) {
// Records are non-zero-initializable if they contain any
// non-zero-initializable subobjects.
if (const RecordType *RT = T->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ auto RD = cast<RecordDecl>(RT->getDecl());
return isZeroInitializable(RD);
}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 5796ab8fe5aa..2ce6591e4eb7 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -15,15 +15,14 @@
#define LLVM_CLANG_LIB_CODEGEN_CODEGENTYPES_H
#include "CGCall.h"
-#include "clang/AST/GlobalDecl.h"
+#include "clang/Basic/ABI.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "clang/Sema/Sema.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/IR/Module.h"
-#include <vector>
namespace llvm {
class FunctionType;
-class Module;
class DataLayout;
class Type;
class LLVMContext;
@@ -48,6 +47,7 @@ class TagDecl;
class TargetInfo;
class Type;
typedef CanQual<Type> CanQualType;
+class GlobalDecl;
namespace CodeGen {
class ABIInfo;
@@ -352,6 +352,10 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(QualType T);
+ /// Check if the pointer type can be zero-initialized (in the C++ sense)
+ /// with an LLVM zeroinitializer.
+ bool isPointerZeroInitializable(QualType T);
+
/// IsZeroInitializable - Return whether a record type can be
/// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
bool isZeroInitializable(const RecordDecl *RD);
diff --git a/lib/CodeGen/ConstantBuilder.h b/lib/CodeGen/ConstantBuilder.h
new file mode 100644
index 000000000000..40b34a9d61c8
--- /dev/null
+++ b/lib/CodeGen/ConstantBuilder.h
@@ -0,0 +1,444 @@
+//===----- ConstantBuilder.h - Builder for LLVM IR constants ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class provides a convenient interface for building complex
+// global initializers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CONSTANTBUILDER_H
+#define LLVM_CLANG_LIB_CODEGEN_CONSTANTBUILDER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Constants.h"
+
+#include "CodeGenModule.h"
+
+#include <vector>
+
+namespace clang {
+namespace CodeGen {
+
+class ConstantStructBuilder;
+class ConstantArrayBuilder;
+
+/// A convenience builder class for complex constant initializers,
+/// especially for anonymous global structures used by various language
+/// runtimes.
+///
+/// The basic usage pattern is expected to be something like:
+/// ConstantInitBuilder builder(CGM);
+/// auto toplevel = builder.beginStruct();
+/// toplevel.addInt(CGM.SizeTy, widgets.size());
+/// auto widgetArray = builder.beginArray();
+/// for (auto &widget : widgets) {
+/// auto widgetDesc = widgetArray.beginStruct();
+/// widgetDesc.addInt(CGM.SizeTy, widget.getPower());
+/// widgetDesc.add(CGM.GetAddrOfConstantString(widget.getName()));
+/// widgetDesc.add(CGM.GetAddrOfGlobal(widget.getInitializerDecl()));
+/// widgetArray.add(widgetDesc.finish());
+/// }
+/// toplevel.add(widgetArray.finish());
+/// auto global = toplevel.finishAndCreateGlobal("WIDGET_LIST", Align,
+/// /*constant*/ true);
+class ConstantInitBuilder {
+ struct SelfReference {
+ llvm::GlobalVariable *Dummy;
+ llvm::SmallVector<llvm::Constant*, 4> Indices;
+
+ SelfReference(llvm::GlobalVariable *dummy) : Dummy(dummy) {}
+ };
+ CodeGenModule &CGM;
+ llvm::SmallVector<llvm::Constant*, 16> Buffer;
+ std::vector<SelfReference> SelfReferences;
+ bool Frozen = false;
+
+public:
+ explicit ConstantInitBuilder(CodeGenModule &CGM) : CGM(CGM) {}
+
+ ~ConstantInitBuilder() {
+ assert(Buffer.empty() && "didn't claim all values out of buffer");
+ }
+
+ class AggregateBuilderBase {
+ protected:
+ ConstantInitBuilder &Builder;
+ AggregateBuilderBase *Parent;
+ size_t Begin;
+ bool Finished = false;
+ bool Frozen = false;
+
+ llvm::SmallVectorImpl<llvm::Constant*> &getBuffer() {
+ return Builder.Buffer;
+ }
+
+ const llvm::SmallVectorImpl<llvm::Constant*> &getBuffer() const {
+ return Builder.Buffer;
+ }
+
+ AggregateBuilderBase(ConstantInitBuilder &builder,
+ AggregateBuilderBase *parent)
+ : Builder(builder), Parent(parent), Begin(builder.Buffer.size()) {
+ if (parent) {
+ assert(!parent->Frozen && "parent already has child builder active");
+ parent->Frozen = true;
+ } else {
+ assert(!builder.Frozen && "builder already has child builder active");
+ builder.Frozen = true;
+ }
+ }
+
+ ~AggregateBuilderBase() {
+ assert(Finished && "didn't finish aggregate builder");
+ }
+
+ void markFinished() {
+ assert(!Frozen && "child builder still active");
+ assert(!Finished && "builder already finished");
+ Finished = true;
+ if (Parent) {
+ assert(Parent->Frozen &&
+ "parent not frozen while child builder active");
+ Parent->Frozen = false;
+ } else {
+ assert(Builder.Frozen &&
+ "builder not frozen while child builder active");
+ Builder.Frozen = false;
+ }
+ }
+
+ public:
+ // Not copyable.
+ AggregateBuilderBase(const AggregateBuilderBase &) = delete;
+ AggregateBuilderBase &operator=(const AggregateBuilderBase &) = delete;
+
+ // Movable, mostly to allow returning. But we have to write this out
+ // properly to satisfy the assert in the destructor.
+ AggregateBuilderBase(AggregateBuilderBase &&other)
+ : Builder(other.Builder), Parent(other.Parent), Begin(other.Begin),
+ Finished(other.Finished), Frozen(other.Frozen) {
+ other.Finished = false;
+ }
+ AggregateBuilderBase &operator=(AggregateBuilderBase &&other) = delete;
+
+ /// Abandon this builder completely.
+ void abandon() {
+ markFinished();
+ auto &buffer = Builder.Buffer;
+ buffer.erase(buffer.begin() + Begin, buffer.end());
+ }
+
+ /// Add a new value to this initializer.
+ void add(llvm::Constant *value) {
+ assert(value && "adding null value to constant initializer");
+ assert(!Finished && "cannot add more values after finishing builder");
+ assert(!Frozen && "cannot add values while subbuilder is active");
+ Builder.Buffer.push_back(value);
+ }
+
+ /// Add an integer value of type size_t.
+ void addSize(CharUnits size) {
+ add(Builder.CGM.getSize(size));
+ }
+
+ /// Add an integer value of a specific type.
+ void addInt(llvm::IntegerType *intTy, uint64_t value,
+ bool isSigned = false) {
+ add(llvm::ConstantInt::get(intTy, value, isSigned));
+ }
+
+ /// Add a null pointer of a specific type.
+ void addNullPointer(llvm::PointerType *ptrTy) {
+ add(llvm::ConstantPointerNull::get(ptrTy));
+ }
+
+ /// Add a bitcast of a value to a specific type.
+ void addBitCast(llvm::Constant *value, llvm::Type *type) {
+ add(llvm::ConstantExpr::getBitCast(value, type));
+ }
+
+ /// Add a bunch of new values to this initializer.
+ void addAll(ArrayRef<llvm::Constant *> values) {
+ assert(!Finished && "cannot add more values after finishing builder");
+ assert(!Frozen && "cannot add values while subbuilder is active");
+ Builder.Buffer.append(values.begin(), values.end());
+ }
+
+ /// An opaque class to hold the abstract position of a placeholder.
+ class PlaceholderPosition {
+ size_t Index;
+ friend class AggregateBuilderBase;
+ PlaceholderPosition(size_t index) : Index(index) {}
+ };
+
+ /// Add a placeholder value to the structure. The returned position
+ /// can be used to set the value later; it will not be invalidated by
+ /// any intermediate operations except (1) filling the same position or
+ /// (2) finishing the entire builder.
+ ///
+ /// This is useful for emitting certain kinds of structure which
+ /// contain some sort of summary field, generaly a count, before any
+ /// of the data. By emitting a placeholder first, the structure can
+ /// be emitted eagerly.
+ PlaceholderPosition addPlaceholder() {
+ assert(!Finished && "cannot add more values after finishing builder");
+ assert(!Frozen && "cannot add values while subbuilder is active");
+ Builder.Buffer.push_back(nullptr);
+ return Builder.Buffer.size() - 1;
+ }
+
+ /// Fill a previously-added placeholder.
+ void fillPlaceholderWithInt(PlaceholderPosition position,
+ llvm::IntegerType *type, uint64_t value,
+ bool isSigned = false) {
+ fillPlaceholder(position, llvm::ConstantInt::get(type, value, isSigned));
+ }
+
+ /// Fill a previously-added placeholder.
+ void fillPlaceholder(PlaceholderPosition position, llvm::Constant *value) {
+ assert(!Finished && "cannot change values after finishing builder");
+ assert(!Frozen && "cannot add values while subbuilder is active");
+ llvm::Constant *&slot = Builder.Buffer[position.Index];
+ assert(slot == nullptr && "placeholder already filled");
+ slot = value;
+ }
+
+ /// Produce an address which will eventually point to the the next
+ /// position to be filled. This is computed with an indexed
+ /// getelementptr rather than by computing offsets.
+ ///
+ /// The returned pointer will have type T*, where T is the given
+ /// position.
+ llvm::Constant *getAddrOfCurrentPosition(llvm::Type *type) {
+ // Make a global variable. We will replace this with a GEP to this
+ // position after installing the initializer.
+ auto dummy =
+ new llvm::GlobalVariable(Builder.CGM.getModule(), type, true,
+ llvm::GlobalVariable::PrivateLinkage,
+ nullptr, "");
+ Builder.SelfReferences.emplace_back(dummy);
+ auto &entry = Builder.SelfReferences.back();
+ (void) getGEPIndicesToCurrentPosition(entry.Indices);
+ return dummy;
+ }
+
+ ArrayRef<llvm::Constant*> getGEPIndicesToCurrentPosition(
+ llvm::SmallVectorImpl<llvm::Constant*> &indices) {
+ getGEPIndicesTo(indices, Builder.Buffer.size());
+ return indices;
+ }
+
+ ConstantArrayBuilder beginArray(llvm::Type *eltTy = nullptr);
+ ConstantStructBuilder beginStruct(llvm::StructType *structTy = nullptr);
+
+ private:
+ void getGEPIndicesTo(llvm::SmallVectorImpl<llvm::Constant*> &indices,
+ size_t position) const {
+ // Recurse on the parent builder if present.
+ if (Parent) {
+ Parent->getGEPIndicesTo(indices, Begin);
+
+ // Otherwise, add an index to drill into the first level of pointer.
+ } else {
+ assert(indices.empty());
+ indices.push_back(llvm::ConstantInt::get(Builder.CGM.Int32Ty, 0));
+ }
+
+ assert(position >= Begin);
+ // We have to use i32 here because struct GEPs demand i32 indices.
+ // It's rather unlikely to matter in practice.
+ indices.push_back(llvm::ConstantInt::get(Builder.CGM.Int32Ty,
+ position - Begin));
+ }
+ };
+
+ template <class Impl>
+ class AggregateBuilder : public AggregateBuilderBase {
+ protected:
+ AggregateBuilder(ConstantInitBuilder &builder,
+ AggregateBuilderBase *parent)
+ : AggregateBuilderBase(builder, parent) {}
+
+ Impl &asImpl() { return *static_cast<Impl*>(this); }
+
+ public:
+ /// Given that this builder was created by beginning an array or struct
+ /// component on the given parent builder, finish the array/struct
+ /// component and add it to the parent.
+ ///
+ /// It is an intentional choice that the parent is passed in explicitly
+ /// despite it being redundant with information already kept in the
+ /// builder. This aids in readability by making it easier to find the
+ /// places that add components to a builder, as well as "bookending"
+ /// the sub-builder more explicitly.
+ void finishAndAddTo(AggregateBuilderBase &parent) {
+ assert(Parent == &parent && "adding to non-parent builder");
+ parent.add(asImpl().finishImpl());
+ }
+
+ /// Given that this builder was created by beginning an array or struct
+ /// directly on a ConstantInitBuilder, finish the array/struct and
+ /// create a global variable with it as the initializer.
+ template <class... As>
+ llvm::GlobalVariable *finishAndCreateGlobal(As &&...args) {
+ assert(!Parent && "finishing non-root builder");
+ return Builder.createGlobal(asImpl().finishImpl(),
+ std::forward<As>(args)...);
+ }
+
+ /// Given that this builder was created by beginning an array or struct
+ /// directly on a ConstantInitBuilder, finish the array/struct and
+ /// set it as the initializer of the given global variable.
+ void finishAndSetAsInitializer(llvm::GlobalVariable *global) {
+ assert(!Parent && "finishing non-root builder");
+ return Builder.setGlobalInitializer(global, asImpl().finishImpl());
+ }
+ };
+
+ ConstantArrayBuilder beginArray(llvm::Type *eltTy = nullptr);
+
+ ConstantStructBuilder beginStruct(llvm::StructType *structTy = nullptr);
+
+private:
+ llvm::GlobalVariable *createGlobal(llvm::Constant *initializer,
+ const llvm::Twine &name,
+ CharUnits alignment,
+ bool constant = false,
+ llvm::GlobalValue::LinkageTypes linkage
+ = llvm::GlobalValue::InternalLinkage,
+ unsigned addressSpace = 0) {
+ auto GV = new llvm::GlobalVariable(CGM.getModule(),
+ initializer->getType(),
+ constant,
+ linkage,
+ initializer,
+ name,
+ /*insert before*/ nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ addressSpace);
+ GV->setAlignment(alignment.getQuantity());
+ resolveSelfReferences(GV);
+ return GV;
+ }
+
+ void setGlobalInitializer(llvm::GlobalVariable *GV,
+ llvm::Constant *initializer) {
+ GV->setInitializer(initializer);
+ resolveSelfReferences(GV);
+ }
+
+ void resolveSelfReferences(llvm::GlobalVariable *GV) {
+ for (auto &entry : SelfReferences) {
+ llvm::Constant *resolvedReference =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(
+ GV->getValueType(), GV, entry.Indices);
+ entry.Dummy->replaceAllUsesWith(resolvedReference);
+ entry.Dummy->eraseFromParent();
+ }
+ }
+};
+
+/// A helper class of ConstantInitBuilder, used for building constant
+/// array initializers.
+class ConstantArrayBuilder
+ : public ConstantInitBuilder::AggregateBuilder<ConstantArrayBuilder> {
+ llvm::Type *EltTy;
+ friend class ConstantInitBuilder;
+ template <class Impl> friend class ConstantInitBuilder::AggregateBuilder;
+ ConstantArrayBuilder(ConstantInitBuilder &builder,
+ AggregateBuilderBase *parent, llvm::Type *eltTy)
+ : AggregateBuilder(builder, parent), EltTy(eltTy) {}
+public:
+ size_t size() const {
+ assert(!Finished);
+ assert(!Frozen);
+ assert(Begin <= getBuffer().size());
+ return getBuffer().size() - Begin;
+ }
+
+ bool empty() const {
+ return size() == 0;
+ }
+
+private:
+ /// Form an array constant from the values that have been added to this
+ /// builder.
+ llvm::Constant *finishImpl() {
+ markFinished();
+
+ auto &buffer = getBuffer();
+ assert((Begin < buffer.size() ||
+ (Begin == buffer.size() && EltTy))
+ && "didn't add any array elements without element type");
+ auto elts = llvm::makeArrayRef(buffer).slice(Begin);
+ auto eltTy = EltTy ? EltTy : elts[0]->getType();
+ auto type = llvm::ArrayType::get(eltTy, elts.size());
+ auto constant = llvm::ConstantArray::get(type, elts);
+ buffer.erase(buffer.begin() + Begin, buffer.end());
+ return constant;
+ }
+};
+
+inline ConstantArrayBuilder
+ConstantInitBuilder::beginArray(llvm::Type *eltTy) {
+ return ConstantArrayBuilder(*this, nullptr, eltTy);
+}
+
+inline ConstantArrayBuilder
+ConstantInitBuilder::AggregateBuilderBase::beginArray(llvm::Type *eltTy) {
+ return ConstantArrayBuilder(Builder, this, eltTy);
+}
+
+/// A helper class of ConstantInitBuilder, used for building constant
+/// struct initializers.
+class ConstantStructBuilder
+ : public ConstantInitBuilder::AggregateBuilder<ConstantStructBuilder> {
+ llvm::StructType *Ty;
+ friend class ConstantInitBuilder;
+ template <class Impl> friend class ConstantInitBuilder::AggregateBuilder;
+ ConstantStructBuilder(ConstantInitBuilder &builder,
+ AggregateBuilderBase *parent, llvm::StructType *ty)
+ : AggregateBuilder(builder, parent), Ty(ty) {}
+
+ /// Finish the struct.
+ llvm::Constant *finishImpl() {
+ markFinished();
+
+ auto &buffer = getBuffer();
+ assert(Begin < buffer.size() && "didn't add any struct elements?");
+ auto elts = llvm::makeArrayRef(buffer).slice(Begin);
+
+ llvm::Constant *constant;
+ if (Ty) {
+ constant = llvm::ConstantStruct::get(Ty, elts);
+ } else {
+ constant = llvm::ConstantStruct::getAnon(elts, /*packed*/ false);
+ }
+
+ buffer.erase(buffer.begin() + Begin, buffer.end());
+ return constant;
+ }
+};
+
+inline ConstantStructBuilder
+ConstantInitBuilder::beginStruct(llvm::StructType *structTy) {
+ return ConstantStructBuilder(*this, nullptr, structTy);
+}
+
+inline ConstantStructBuilder
+ConstantInitBuilder::AggregateBuilderBase::beginStruct(
+ llvm::StructType *structTy) {
+ return ConstantStructBuilder(Builder, this, structTy);
+}
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CoverageMappingGen.cpp b/lib/CodeGen/CoverageMappingGen.cpp
index b011a0f319e3..5bc9e5011aa8 100644
--- a/lib/CodeGen/CoverageMappingGen.cpp
+++ b/lib/CodeGen/CoverageMappingGen.cpp
@@ -23,6 +23,7 @@
#include "llvm/ProfileData/Coverage/CoverageMappingWriter.h"
#include "llvm/ProfileData/InstrProfReader.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
using namespace clang;
using namespace CodeGen;
@@ -91,6 +92,14 @@ public:
/// \brief The source mapping regions for this function.
std::vector<SourceMappingRegion> SourceRegions;
+ /// \brief A set of regions which can be used as a filter.
+ ///
+ /// It is produced by emitExpansionRegions() and is used in
+ /// emitSourceRegions() to suppress producing code regions if
+ /// the same area is covered by expansion regions.
+ typedef llvm::SmallSet<std::pair<SourceLocation, SourceLocation>, 8>
+ SourceRegionFilter;
+
CoverageMappingBuilder(CoverageMappingModuleGen &CVM, SourceManager &SM,
const LangOptions &LangOpts)
: CVM(CVM), SM(SM), LangOpts(LangOpts) {}
@@ -127,7 +136,7 @@ public:
/// \brief Return true if \c Loc is a location in a built-in macro.
bool isInBuiltin(SourceLocation Loc) {
- return strcmp(SM.getBufferName(SM.getSpellingLoc(Loc)), "<built-in>") == 0;
+ return SM.getBufferName(SM.getSpellingLoc(Loc)) == "<built-in>";
}
/// \brief Check whether \c Loc is included or expanded from \c Parent.
@@ -248,7 +257,7 @@ public:
/// \brief Generate the coverage counter mapping regions from collected
/// source regions.
- void emitSourceRegions() {
+ void emitSourceRegions(const SourceRegionFilter &Filter) {
for (const auto &Region : SourceRegions) {
assert(Region.hasEndLoc() && "incomplete region");
@@ -268,6 +277,13 @@ public:
assert(SM.isWrittenInSameFile(LocStart, LocEnd) &&
"region spans multiple files");
+ // Don't add code regions for the area covered by expansion regions.
+ // This not only suppresses redundant regions, but sometimes prevents
+ // creating regions with wrong counters if, for example, a statement's
+ // body ends at the end of a nested macro.
+ if (Filter.count(std::make_pair(LocStart, LocEnd)))
+ continue;
+
// Find the spilling locations for the mapping region.
unsigned LineStart = SM.getSpellingLineNumber(LocStart);
unsigned ColumnStart = SM.getSpellingColumnNumber(LocStart);
@@ -282,7 +298,8 @@ public:
}
/// \brief Generate expansion regions for each virtual file we've seen.
- void emitExpansionRegions() {
+ SourceRegionFilter emitExpansionRegions() {
+ SourceRegionFilter Filter;
for (const auto &FM : FileIDMapping) {
SourceLocation ExpandedLoc = FM.second.second;
SourceLocation ParentLoc = getIncludeOrExpansionLoc(ExpandedLoc);
@@ -298,6 +315,7 @@ public:
SourceLocation LocEnd = getPreciseTokenLocEnd(ParentLoc);
assert(SM.isWrittenInSameFile(ParentLoc, LocEnd) &&
"region spans multiple files");
+ Filter.insert(std::make_pair(ParentLoc, LocEnd));
unsigned LineStart = SM.getSpellingLineNumber(ParentLoc);
unsigned ColumnStart = SM.getSpellingColumnNumber(ParentLoc);
@@ -308,6 +326,7 @@ public:
*ParentFileID, *ExpandedFileID, LineStart, ColumnStart, LineEnd,
ColumnEnd));
}
+ return Filter;
}
};
@@ -349,7 +368,7 @@ struct EmptyCoverageMappingBuilder : public CoverageMappingBuilder {
void write(llvm::raw_ostream &OS) {
SmallVector<unsigned, 16> FileIDMapping;
gatherFileIDs(FileIDMapping);
- emitSourceRegions();
+ emitSourceRegions(SourceRegionFilter());
if (MappingRegions.empty())
return;
@@ -431,7 +450,8 @@ struct CounterCoverageMappingBuilder
SourceLocation NestedLoc = getStartOfFileOrMacro(EndLoc);
assert(SM.isWrittenInSameFile(NestedLoc, EndLoc));
- SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
+ if (!isRegionAlreadyAdded(NestedLoc, EndLoc))
+ SourceRegions.emplace_back(Region.getCounter(), NestedLoc, EndLoc);
EndLoc = getPreciseTokenLocEnd(getIncludeOrExpansionLoc(EndLoc));
if (EndLoc.isInvalid())
@@ -603,8 +623,8 @@ struct CounterCoverageMappingBuilder
void write(llvm::raw_ostream &OS) {
llvm::SmallVector<unsigned, 8> VirtualFileMapping;
gatherFileIDs(VirtualFileMapping);
- emitSourceRegions();
- emitExpansionRegions();
+ SourceRegionFilter Filter = emitExpansionRegions();
+ emitSourceRegions(Filter);
gatherSkippedRegions();
if (MappingRegions.empty())
@@ -793,6 +813,8 @@ struct CounterCoverageMappingBuilder
void VisitSwitchStmt(const SwitchStmt *S) {
extendRegion(S);
+ if (S->getInit())
+ Visit(S->getInit());
Visit(S->getCond());
BreakContinueStack.push_back(BreakContinue());
@@ -822,7 +844,11 @@ struct CounterCoverageMappingBuilder
Counter ExitCount = getRegionCounter(S);
SourceLocation ExitLoc = getEnd(S);
- pushRegion(ExitCount, getStart(S), ExitLoc);
+ pushRegion(ExitCount);
+
+ // Ensure that handleFileExit recognizes when the end location is located
+ // in a different file.
+ MostRecentLocation = getStart(S);
handleFileExit(ExitLoc);
}
@@ -849,6 +875,9 @@ struct CounterCoverageMappingBuilder
void VisitIfStmt(const IfStmt *S) {
extendRegion(S);
+ if (S->getInit())
+ Visit(S->getInit());
+
// Extend into the condition before we propagate through it below - this is
// needed to handle macros that generate the "if" but not the condition.
extendRegion(S->getCond());
@@ -931,16 +960,24 @@ struct CounterCoverageMappingBuilder
// propagate counts into them.
}
};
-}
-static bool isMachO(const CodeGenModule &CGM) {
+bool isMachO(const CodeGenModule &CGM) {
return CGM.getTarget().getTriple().isOSBinFormatMachO();
}
-static StringRef getCoverageSection(const CodeGenModule &CGM) {
+StringRef getCoverageSection(const CodeGenModule &CGM) {
return llvm::getInstrProfCoverageSectionName(isMachO(CGM));
}
+std::string normalizeFilename(StringRef Filename) {
+ llvm::SmallString<256> Path(Filename);
+ llvm::sys::fs::make_absolute(Path);
+ llvm::sys::path::remove_dots(Path, /*remove_dot_dots=*/true);
+ return Path.str().str();
+}
+
+} // end anonymous namespace
+
static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
ArrayRef<CounterExpression> Expressions,
ArrayRef<CounterMappingRegion> Regions) {
@@ -1002,10 +1039,15 @@ void CoverageMappingModuleGen::addFunctionMappingRecord(
std::vector<StringRef> Filenames;
std::vector<CounterExpression> Expressions;
std::vector<CounterMappingRegion> Regions;
+ llvm::SmallVector<std::string, 16> FilenameStrs;
llvm::SmallVector<StringRef, 16> FilenameRefs;
+ FilenameStrs.resize(FileEntries.size());
FilenameRefs.resize(FileEntries.size());
- for (const auto &Entry : FileEntries)
- FilenameRefs[Entry.second] = Entry.first->getName();
+ for (const auto &Entry : FileEntries) {
+ auto I = Entry.second;
+ FilenameStrs[I] = normalizeFilename(Entry.first->getName());
+ FilenameRefs[I] = FilenameStrs[I];
+ }
RawCoverageMappingReader Reader(CoverageMapping, FilenameRefs, Filenames,
Expressions, Regions);
if (Reader.read())
@@ -1026,11 +1068,8 @@ void CoverageMappingModuleGen::emit() {
FilenameStrs.resize(FileEntries.size());
FilenameRefs.resize(FileEntries.size());
for (const auto &Entry : FileEntries) {
- llvm::SmallString<256> Path(Entry.first->getName());
- llvm::sys::fs::make_absolute(Path);
-
auto I = Entry.second;
- FilenameStrs[I] = std::string(Path.begin(), Path.end());
+ FilenameStrs[I] = normalizeFilename(Entry.first->getName());
FilenameRefs[I] = FilenameStrs[I];
}
diff --git a/lib/CodeGen/CoverageMappingGen.h b/lib/CodeGen/CoverageMappingGen.h
index c202fe899343..b6789c2a79f1 100644
--- a/lib/CodeGen/CoverageMappingGen.h
+++ b/lib/CodeGen/CoverageMappingGen.h
@@ -19,7 +19,6 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Lex/PPCallbacks.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/lib/CodeGen/EHScopeStack.h b/lib/CodeGen/EHScopeStack.h
index 4717a667d2d2..243583038558 100644
--- a/lib/CodeGen/EHScopeStack.h
+++ b/lib/CodeGen/EHScopeStack.h
@@ -271,7 +271,7 @@ public:
/// Push a lazily-created cleanup on the stack.
template <class T, class... As> void pushCleanup(CleanupKind Kind, As... A) {
- static_assert(llvm::AlignOf<T>::Alignment <= ScopeStackAlignment,
+ static_assert(alignof(T) <= ScopeStackAlignment,
"Cleanup's alignment is too large.");
void *Buffer = pushCleanup(Kind, sizeof(T));
Cleanup *Obj = new (Buffer) T(A...);
@@ -281,7 +281,7 @@ public:
/// Push a lazily-created cleanup on the stack. Tuple version.
template <class T, class... As>
void pushCleanupTuple(CleanupKind Kind, std::tuple<As...> A) {
- static_assert(llvm::AlignOf<T>::Alignment <= ScopeStackAlignment,
+ static_assert(alignof(T) <= ScopeStackAlignment,
"Cleanup's alignment is too large.");
void *Buffer = pushCleanup(Kind, sizeof(T));
Cleanup *Obj = new (Buffer) T(std::move(A));
@@ -303,7 +303,7 @@ public:
/// stack is modified.
template <class T, class... As>
T *pushCleanupWithExtra(CleanupKind Kind, size_t N, As... A) {
- static_assert(llvm::AlignOf<T>::Alignment <= ScopeStackAlignment,
+ static_assert(alignof(T) <= ScopeStackAlignment,
"Cleanup's alignment is too large.");
void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
return new (Buffer) T(N, A...);
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
index 6051594fb001..b5d90ea59a49 100644
--- a/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -24,6 +24,7 @@
#include "CGVTables.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantBuilder.h"
#include "TargetInfo.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/Type.h"
@@ -45,6 +46,7 @@ class ItaniumCXXABI : public CodeGen::CGCXXABI {
protected:
bool UseARMMethodPtrABI;
bool UseARMGuardVarABI;
+ bool Use32BitVTableOffsetABI;
ItaniumMangleContext &getMangleContext() {
return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
@@ -55,7 +57,8 @@ public:
bool UseARMMethodPtrABI = false,
bool UseARMGuardVarABI = false) :
CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
- UseARMGuardVarABI(UseARMGuardVarABI) { }
+ UseARMGuardVarABI(UseARMGuardVarABI),
+ Use32BitVTableOffsetABI(false) { }
bool classifyReturnType(CGFunctionInfo &FI) const override;
@@ -112,7 +115,7 @@ public:
llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT) override;
- llvm::Value *
+ CGCallee
EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
const Expr *E,
Address This,
@@ -168,8 +171,8 @@ public:
emitTerminateForUnexpectedException(CodeGenFunction &CGF,
llvm::Value *Exn) override;
- void EmitFundamentalRTTIDescriptor(QualType Type);
- void EmitFundamentalRTTIDescriptors();
+ void EmitFundamentalRTTIDescriptor(QualType Type, bool DLLExport);
+ void EmitFundamentalRTTIDescriptors(bool DLLExport);
llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
CatchTypeInfo
getAddrOfCXXCatchHandlerType(QualType Ty,
@@ -261,9 +264,9 @@ public:
llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) override;
- llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
- Address This, llvm::Type *Ty,
- SourceLocation Loc) override;
+ CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
+ Address This, llvm::Type *Ty,
+ SourceLocation Loc) override;
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor,
@@ -363,11 +366,12 @@ public:
void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) override;
private:
- bool hasAnyUsedVirtualInlineFunction(const CXXRecordDecl *RD) const {
+ bool hasAnyVirtualInlineFunction(const CXXRecordDecl *RD) const {
const auto &VtableLayout =
CGM.getItaniumVTableContext().getVTableLayout(RD);
for (const auto &VtableComponent : VtableLayout.vtable_components()) {
+ // Skip empty slot.
if (!VtableComponent.isUsedFunctionPointerKind())
continue;
@@ -425,7 +429,9 @@ public:
class iOS64CXXABI : public ARMCXXABI {
public:
- iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {}
+ iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
+ Use32BitVTableOffsetABI = true;
+ }
// ARM64 libraries are prepared for non-unique RTTI.
bool shouldRTTIBeUnique() const override { return false; }
@@ -516,7 +522,7 @@ ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
///
/// If the member is non-virtual, memptr.ptr is the address of
/// the function to call.
-llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
+CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, Address ThisAddr,
llvm::Value *&ThisPtrForCall,
llvm::Value *MemFnPtr, const MemberPointerType *MPT) {
@@ -579,9 +585,15 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
// Apply the offset.
+ // On ARM64, to reserve extra space in virtual member function pointers,
+ // we only pay attention to the low 32 bits of the offset.
llvm::Value *VTableOffset = FnAsInt;
if (!UseARMMethodPtrABI)
VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
+ if (Use32BitVTableOffsetABI) {
+ VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
+ VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
+ }
VTable = Builder.CreateGEP(VTable, VTableOffset);
// Load the virtual function to call.
@@ -599,9 +611,11 @@ llvm::Value *ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// We're done.
CGF.EmitBlock(FnEnd);
- llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2);
- Callee->addIncoming(VirtualFn, FnVirtual);
- Callee->addIncoming(NonVirtualFn, FnNonVirtual);
+ llvm::PHINode *CalleePtr = Builder.CreatePHI(FTy->getPointerTo(), 2);
+ CalleePtr->addIncoming(VirtualFn, FnVirtual);
+ CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual);
+
+ CGCallee Callee(FPT, CalleePtr);
return Callee;
}
@@ -1390,6 +1404,10 @@ void ItaniumCXXABI::addImplicitStructorParams(CodeGenFunction &CGF,
}
void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ // Naked functions have no prolog.
+ if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
+ return;
+
/// Initialize the 'this' slot.
EmitThisParam(CGF);
@@ -1434,15 +1452,18 @@ void ItaniumCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
llvm::Value *VTT = CGF.GetVTTParameter(GD, ForVirtualBase, Delegating);
QualType VTTTy = getContext().getPointerType(getContext().VoidPtrTy);
- llvm::Value *Callee = nullptr;
- if (getContext().getLangOpts().AppleKext)
+ CGCallee Callee;
+ if (getContext().getLangOpts().AppleKext &&
+ Type != Dtor_Base && DD->isVirtual())
Callee = CGF.BuildAppleKextVirtualDestructorCall(DD, Type, DD->getParent());
-
- if (!Callee)
- Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
+ else
+ Callee =
+ CGCallee::forDirect(CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
+ DD);
CGF.EmitCXXMemberOrOperatorCall(DD, Callee, ReturnValueSlot(),
- This.getPointer(), VTT, VTTTy, nullptr);
+ This.getPointer(), VTT, VTTTy,
+ nullptr, nullptr);
}
void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -1458,10 +1479,10 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
CGM.GetAddrOfRTTIDescriptor(CGM.getContext().getTagDeclType(RD));
// Create and set the initializer.
- llvm::Constant *Init = CGVT.CreateVTableInitializer(
- RD, VTLayout.vtable_component_begin(), VTLayout.getNumVTableComponents(),
- VTLayout.vtable_thunk_begin(), VTLayout.getNumVTableThunks(), RTTI);
- VTable->setInitializer(Init);
+ ConstantInitBuilder Builder(CGM);
+ auto Components = Builder.beginStruct();
+ CGVT.createVTableInitializer(Components, VTLayout, RTTI);
+ Components.finishAndSetAsInitializer(VTable);
// Set the correct linkage.
VTable->setLinkage(Linkage);
@@ -1487,7 +1508,7 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
isa<NamespaceDecl>(DC) && cast<NamespaceDecl>(DC)->getIdentifier() &&
cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
DC->getParent()->isTranslationUnit())
- EmitFundamentalRTTIDescriptors();
+ EmitFundamentalRTTIDescriptors(RD->hasAttr<DLLExportAttr>());
if (!VTable->isDeclarationForLinker())
CGM.EmitVTableTypeMetadata(VTable, VTLayout);
@@ -1517,17 +1538,21 @@ ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
const CXXRecordDecl *VTableClass) {
llvm::GlobalValue *VTable = getAddrOfVTable(VTableClass, CharUnits());
- // Find the appropriate vtable within the vtable group.
- uint64_t AddressPoint = CGM.getItaniumVTableContext()
- .getVTableLayout(VTableClass)
- .getAddressPoint(Base);
+ // Find the appropriate vtable within the vtable group, and the address point
+ // within that vtable.
+ VTableLayout::AddressPointLocation AddressPoint =
+ CGM.getItaniumVTableContext()
+ .getVTableLayout(VTableClass)
+ .getAddressPoint(Base);
llvm::Value *Indices[] = {
llvm::ConstantInt::get(CGM.Int32Ty, 0),
- llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint)
+ llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.VTableIndex),
+ llvm::ConstantInt::get(CGM.Int32Ty, AddressPoint.AddressPointIndex),
};
- return llvm::ConstantExpr::getInBoundsGetElementPtr(VTable->getValueType(),
- VTable, Indices);
+ return llvm::ConstantExpr::getGetElementPtr(VTable->getValueType(), VTable,
+ Indices, /*InBounds=*/true,
+ /*InRangeIndex=*/1);
}
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
@@ -1569,12 +1594,12 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
llvm::raw_svector_ostream Out(Name);
getMangleContext().mangleCXXVTable(RD, Out);
- ItaniumVTableContext &VTContext = CGM.getItaniumVTableContext();
- llvm::ArrayType *ArrayType = llvm::ArrayType::get(
- CGM.Int8PtrTy, VTContext.getVTableLayout(RD).getNumVTableComponents());
+ const VTableLayout &VTLayout =
+ CGM.getItaniumVTableContext().getVTableLayout(RD);
+ llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
VTable = CGM.CreateOrReplaceCXXRuntimeVariable(
- Name, ArrayType, llvm::GlobalValue::ExternalLinkage);
+ Name, VTableType, llvm::GlobalValue::ExternalLinkage);
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
if (RD->hasAttr<DLLImportAttr>())
@@ -1585,19 +1610,20 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
return VTable;
}
-llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
- GlobalDecl GD,
- Address This,
- llvm::Type *Ty,
- SourceLocation Loc) {
+CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ Address This,
+ llvm::Type *Ty,
+ SourceLocation Loc) {
GD = GD.getCanonicalDecl();
Ty = Ty->getPointerTo()->getPointerTo();
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
llvm::Value *VTable = CGF.GetVTablePtr(This, Ty, MethodDecl->getParent());
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
+ llvm::Value *VFunc;
if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
- return CGF.EmitVTableTypeCheckedLoad(
+ VFunc = CGF.EmitVTableTypeCheckedLoad(
MethodDecl->getParent(), VTable,
VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
} else {
@@ -1605,8 +1631,26 @@ llvm::Value *ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
- return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
- }
+ auto *VFuncLoad =
+ CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
+
+ // Add !invariant.load md to virtual function load to indicate that
+ // function didn't change inside vtable.
+ // It's safe to add it without -fstrict-vtable-pointers, but it would not
+ // help in devirtualization because it will only matter if we will have 2
+ // the same virtual function loads from the same vtable load, which won't
+ // happen without enabled devirtualization with -fstrict-vtable-pointers.
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ CGM.getCodeGenOpts().StrictVTablePointers)
+ VFuncLoad->setMetadata(
+ llvm::LLVMContext::MD_invariant_load,
+ llvm::MDNode::get(CGM.getLLVMContext(),
+ llvm::ArrayRef<llvm::Metadata *>()));
+ VFunc = VFuncLoad;
+ }
+
+ CGCallee Callee(MethodDecl, VFunc);
+ return Callee;
}
llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
@@ -1618,13 +1662,13 @@ llvm::Value *ItaniumCXXABI::EmitVirtualDestructorCall(
const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
Dtor, getFromDtorType(DtorType));
llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
- llvm::Value *Callee =
+ CGCallee Callee =
getVirtualFunctionPointer(CGF, GlobalDecl(Dtor, DtorType), This, Ty,
CE ? CE->getLocStart() : SourceLocation());
CGF.EmitCXXMemberOrOperatorCall(Dtor, Callee, ReturnValueSlot(),
This.getPointer(), /*ImplicitParam=*/nullptr,
- QualType(), CE);
+ QualType(), CE, nullptr);
return nullptr;
}
@@ -1644,7 +1688,7 @@ bool ItaniumCXXABI::canSpeculativelyEmitVTable(const CXXRecordDecl *RD) const {
// then we are safe to emit available_externally copy of vtable.
// FIXME we can still emit a copy of the vtable if we
// can emit definition of the inline functions.
- return !hasAnyUsedVirtualInlineFunction(RD) && !isVTableHidden(RD);
+ return !hasAnyVirtualInlineFunction(RD) && !isVTableHidden(RD);
}
static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
Address InitialPtr,
@@ -2342,8 +2386,7 @@ LValue ItaniumCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
llvm::Function *Wrapper = getOrCreateThreadLocalWrapper(VD, Val);
llvm::CallInst *CallVal = CGF.Builder.CreateCall(Wrapper);
- if (isThreadWrapperReplaceable(VD, CGF.CGM))
- CallVal->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
+ CallVal->setCallingConv(Wrapper->getCallingConv());
LValue LV;
if (VD->getType()->isReferenceType())
@@ -2436,7 +2479,13 @@ public:
/// PTI_ContainingClassIncomplete - Containing class is incomplete.
/// (in pointer to member).
- PTI_ContainingClassIncomplete = 0x10
+ PTI_ContainingClassIncomplete = 0x10,
+
+ /// PTI_TransactionSafe - Pointee is transaction_safe function (C++ TM TS).
+ //PTI_TransactionSafe = 0x20,
+
+ /// PTI_Noexcept - Pointee is noexcept function (C++1z).
+ PTI_Noexcept = 0x40,
};
// VMI type info flags.
@@ -2460,7 +2509,9 @@ public:
/// BuildTypeInfo - Build the RTTI type info struct for the given type.
///
/// \param Force - true to force the creation of this RTTI value
- llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
+ /// \param DLLExport - true to mark the RTTI value as DLLExport
+ llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false,
+ bool DLLExport = false);
};
}
@@ -2865,16 +2916,18 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
case VisibleNoLinkage:
case ExternalLinkage:
- if (!CGM.getLangOpts().RTTI) {
- // RTTI is not enabled, which means that this type info struct is going
- // to be used for exception handling. Give it linkonce_odr linkage.
+ // RTTI is not enabled, which means that this type info struct is going
+ // to be used for exception handling. Give it linkonce_odr linkage.
+ if (!CGM.getLangOpts().RTTI)
return llvm::GlobalValue::LinkOnceODRLinkage;
- }
if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
if (RD->hasAttr<WeakAttr>())
return llvm::GlobalValue::WeakODRLinkage;
+ if (CGM.getTriple().isWindowsItaniumEnvironment())
+ if (RD->hasAttr<DLLImportAttr>())
+ return llvm::GlobalValue::ExternalLinkage;
if (RD->isDynamicClass()) {
llvm::GlobalValue::LinkageTypes LT = CGM.getVTableLinkage(RD);
// MinGW won't export the RTTI information when there is a key function.
@@ -2892,7 +2945,8 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(CodeGenModule &CGM,
llvm_unreachable("Invalid linkage!");
}
-llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
+llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force,
+ bool DLLExport) {
// We want to operate on the canonical type.
Ty = Ty.getCanonicalType();
@@ -3075,25 +3129,28 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
llvmVisibility = llvm::GlobalValue::HiddenVisibility;
else
llvmVisibility = CodeGenModule::GetLLVMVisibility(Ty->getVisibility());
+
TypeName->setVisibility(llvmVisibility);
GV->setVisibility(llvmVisibility);
- return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
-}
-
-/// ComputeQualifierFlags - Compute the pointer type info flags from the
-/// given qualifier.
-static unsigned ComputeQualifierFlags(Qualifiers Quals) {
- unsigned Flags = 0;
-
- if (Quals.hasConst())
- Flags |= ItaniumRTTIBuilder::PTI_Const;
- if (Quals.hasVolatile())
- Flags |= ItaniumRTTIBuilder::PTI_Volatile;
- if (Quals.hasRestrict())
- Flags |= ItaniumRTTIBuilder::PTI_Restrict;
+ if (CGM.getTriple().isWindowsItaniumEnvironment()) {
+ auto RD = Ty->getAsCXXRecordDecl();
+ if (DLLExport || (RD && RD->hasAttr<DLLExportAttr>())) {
+ TypeName->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ } else if (CGM.getLangOpts().RTTI && RD && RD->hasAttr<DLLImportAttr>()) {
+ TypeName->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+
+ // Because the typename and the typeinfo are DLL import, convert them to
+ // declarations rather than definitions. The initializers still need to
+ // be constructed to calculate the type for the declarations.
+ TypeName->setInitializer(nullptr);
+ GV->setInitializer(nullptr);
+ }
+ }
- return Flags;
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
}
/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
@@ -3214,9 +3271,6 @@ void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
if (!RD->getNumBases())
return;
- llvm::Type *LongLTy =
- CGM.getTypes().ConvertType(CGM.getContext().LongTy);
-
// Now add the base class descriptions.
// Itanium C++ ABI 2.9.5p6c:
@@ -3234,6 +3288,19 @@ void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
// __offset_shift = 8
// };
// };
+
+ // If we're in mingw and 'long' isn't wide enough for a pointer, use 'long
+ // long' instead of 'long' for __offset_flags. libstdc++abi uses long long on
+ // LLP64 platforms.
+ // FIXME: Consider updating libc++abi to match, and extend this logic to all
+ // LLP64 platforms.
+ QualType OffsetFlagsTy = CGM.getContext().LongTy;
+ const TargetInfo &TI = CGM.getContext().getTargetInfo();
+ if (TI.getTriple().isOSCygMing() && TI.getPointerWidth(0) > TI.getLongWidth())
+ OffsetFlagsTy = CGM.getContext().LongLongTy;
+ llvm::Type *OffsetFlagsLTy =
+ CGM.getTypes().ConvertType(OffsetFlagsTy);
+
for (const auto &Base : RD->bases()) {
// The __base_type member points to the RTTI for the base type.
Fields.push_back(ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(Base.getType()));
@@ -3265,27 +3332,48 @@ void ItaniumRTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
if (Base.getAccessSpecifier() == AS_public)
OffsetFlags |= BCTI_Public;
- Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
+ Fields.push_back(llvm::ConstantInt::get(OffsetFlagsLTy, OffsetFlags));
+ }
+}
+
+/// Compute the flags for a __pbase_type_info, and remove the corresponding
+/// pieces from \p Type.
+static unsigned extractPBaseFlags(ASTContext &Ctx, QualType &Type) {
+ unsigned Flags = 0;
+
+ if (Type.isConstQualified())
+ Flags |= ItaniumRTTIBuilder::PTI_Const;
+ if (Type.isVolatileQualified())
+ Flags |= ItaniumRTTIBuilder::PTI_Volatile;
+ if (Type.isRestrictQualified())
+ Flags |= ItaniumRTTIBuilder::PTI_Restrict;
+ Type = Type.getUnqualifiedType();
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(Type))
+ Flags |= ItaniumRTTIBuilder::PTI_Incomplete;
+
+ if (auto *Proto = Type->getAs<FunctionProtoType>()) {
+ if (Proto->isNothrow(Ctx)) {
+ Flags |= ItaniumRTTIBuilder::PTI_Noexcept;
+ Type = Ctx.getFunctionType(
+ Proto->getReturnType(), Proto->getParamTypes(),
+ Proto->getExtProtoInfo().withExceptionSpec(EST_None));
+ }
}
+
+ return Flags;
}
/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
/// used for pointer types.
void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
- Qualifiers Quals;
- QualType UnqualifiedPointeeTy =
- CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
-
// Itanium C++ ABI 2.9.5p7:
// __flags is a flag word describing the cv-qualification and other
// attributes of the type pointed to
- unsigned Flags = ComputeQualifierFlags(Quals);
-
- // Itanium C++ ABI 2.9.5p7:
- // When the abi::__pbase_type_info is for a direct or indirect pointer to an
- // incomplete class type, the incomplete target type flag is set.
- if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
- Flags |= PTI_Incomplete;
+ unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
llvm::Type *UnsignedIntLTy =
CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
@@ -3295,7 +3383,7 @@ void ItaniumRTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
// __pointee is a pointer to the std::type_info derivation for the
// unqualified type being pointed to.
llvm::Constant *PointeeTypeInfo =
- ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy);
+ ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
Fields.push_back(PointeeTypeInfo);
}
@@ -3305,23 +3393,12 @@ void
ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
QualType PointeeTy = Ty->getPointeeType();
- Qualifiers Quals;
- QualType UnqualifiedPointeeTy =
- CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
-
// Itanium C++ ABI 2.9.5p7:
// __flags is a flag word describing the cv-qualification and other
// attributes of the type pointed to.
- unsigned Flags = ComputeQualifierFlags(Quals);
+ unsigned Flags = extractPBaseFlags(CGM.getContext(), PointeeTy);
const RecordType *ClassType = cast<RecordType>(Ty->getClass());
-
- // Itanium C++ ABI 2.9.5p7:
- // When the abi::__pbase_type_info is for a direct or indirect pointer to an
- // incomplete class type, the incomplete target type flag is set.
- if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
- Flags |= PTI_Incomplete;
-
if (IsIncompleteClassType(ClassType))
Flags |= PTI_ContainingClassIncomplete;
@@ -3333,7 +3410,7 @@ ItaniumRTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
// __pointee is a pointer to the std::type_info derivation for the
// unqualified type being pointed to.
llvm::Constant *PointeeTypeInfo =
- ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(UnqualifiedPointeeTy);
+ ItaniumRTTIBuilder(CXXABI).BuildTypeInfo(PointeeTy);
Fields.push_back(PointeeTypeInfo);
// Itanium C++ ABI 2.9.5p9:
@@ -3348,15 +3425,18 @@ llvm::Constant *ItaniumCXXABI::getAddrOfRTTIDescriptor(QualType Ty) {
return ItaniumRTTIBuilder(*this).BuildTypeInfo(Ty);
}
-void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type) {
+void ItaniumCXXABI::EmitFundamentalRTTIDescriptor(QualType Type,
+ bool DLLExport) {
QualType PointerType = getContext().getPointerType(Type);
QualType PointerTypeConst = getContext().getPointerType(Type.withConst());
- ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, true);
- ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, true);
- ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true);
+ ItaniumRTTIBuilder(*this).BuildTypeInfo(Type, /*Force=*/true, DLLExport);
+ ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerType, /*Force=*/true,
+ DLLExport);
+ ItaniumRTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, /*Force=*/true,
+ DLLExport);
}
-void ItaniumCXXABI::EmitFundamentalRTTIDescriptors() {
+void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(bool DLLExport) {
// Types added here must also be added to TypeInfoIsInStandardLibrary.
QualType FundamentalTypes[] = {
getContext().VoidTy, getContext().NullPtrTy,
@@ -3373,7 +3453,7 @@ void ItaniumCXXABI::EmitFundamentalRTTIDescriptors() {
getContext().Char16Ty, getContext().Char32Ty
};
for (const QualType &FundamentalType : FundamentalTypes)
- EmitFundamentalRTTIDescriptor(FundamentalType);
+ EmitFundamentalRTTIDescriptor(FundamentalType, DLLExport);
}
/// What sort of uniqueness rules should we use for the RTTI for the
@@ -3820,7 +3900,8 @@ static llvm::Constant *getClangCallTerminateFn(CodeGenModule &CGM) {
llvm::FunctionType *fnTy =
llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
llvm::Constant *fnRef =
- CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate");
+ CGM.CreateRuntimeFunction(fnTy, "__clang_call_terminate",
+ llvm::AttributeSet(), /*Local=*/true);
llvm::Function *fn = dyn_cast<llvm::Function>(fnRef);
if (fn && fn->empty()) {
diff --git a/lib/CodeGen/MicrosoftCXXABI.cpp b/lib/CodeGen/MicrosoftCXXABI.cpp
index 41cd53c2215f..38df455011e3 100644
--- a/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -19,6 +19,7 @@
#include "CGVTables.h"
#include "CodeGenModule.h"
#include "CodeGenTypes.h"
+#include "ConstantBuilder.h"
#include "TargetInfo.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -96,9 +97,9 @@ public:
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
- for (const VPtrInfo *VBT : *VBGlobals.VBTables) {
+ for (const std::unique_ptr<VPtrInfo> &VBT : *VBGlobals.VBTables) {
const ASTRecordLayout &SubobjectLayout =
- Context.getASTRecordLayout(VBT->BaseWithVPtr);
+ Context.getASTRecordLayout(VBT->IntroducingObject);
CharUnits Offs = VBT->NonVirtualOffset;
Offs += SubobjectLayout.getVBPtrOffset();
if (VBT->getVBaseWithVPtr())
@@ -122,7 +123,7 @@ public:
void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
llvm::GlobalVariable *getMSCompleteObjectLocator(const CXXRecordDecl *RD,
- const VPtrInfo *Info);
+ const VPtrInfo &Info);
llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) override;
CatchTypeInfo
@@ -164,6 +165,9 @@ public:
llvm::BasicBlock *
EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD) override;
+
+ llvm::BasicBlock *
+ EmitDtorCompleteObjectHandler(CodeGenFunction &CGF);
void initializeHiddenVirtualInheritanceMembers(CodeGenFunction &CGF,
const CXXRecordDecl *RD) override;
@@ -254,7 +258,7 @@ public:
CXXDtorType Type, bool ForVirtualBase,
bool Delegating, Address This) override;
- void emitVTableTypeMetadata(VPtrInfo *Info, const CXXRecordDecl *RD,
+ void emitVTableTypeMetadata(const VPtrInfo &Info, const CXXRecordDecl *RD,
llvm::GlobalVariable *VTable);
void emitVTableDefinitions(CodeGenVTables &CGVT,
@@ -284,9 +288,9 @@ public:
llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
CharUnits VPtrOffset) override;
- llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
- Address This, llvm::Type *Ty,
- SourceLocation Loc) override;
+ CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF, GlobalDecl GD,
+ Address This, llvm::Type *Ty,
+ SourceLocation Loc) override;
llvm::Value *EmitVirtualDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *Dtor,
@@ -660,7 +664,7 @@ public:
CastKind CK, CastExpr::path_const_iterator PathBegin,
CastExpr::path_const_iterator PathEnd, llvm::Constant *Src);
- llvm::Value *
+ CGCallee
EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF, const Expr *E,
Address This, llvm::Value *&ThisPtrForCall,
llvm::Value *MemPtr,
@@ -794,6 +798,12 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
// FIXME: Implement for other architectures.
return RAA_Default;
+ case llvm::Triple::thumb:
+ // Use the simple Itanium rules for now.
+ // FIXME: This is incompatible with MSVC for arguments with a dtor and no
+ // copy ctor.
+ return !canCopyArgument(RD) ? RAA_Indirect : RAA_Default;
+
case llvm::Triple::x86:
// All record arguments are passed in memory on x86. Decide whether to
// construct the object directly in argument memory, or to construct the
@@ -824,25 +834,32 @@ MicrosoftCXXABI::getRecordArgABI(const CXXRecordDecl *RD) const {
getContext().getTypeSize(RD->getTypeForDecl()) > 64)
return RAA_Indirect;
- // We have a trivial copy constructor or no copy constructors, but we have
- // to make sure it isn't deleted.
- bool CopyDeleted = false;
+ // If this is true, the implicit copy constructor that Sema would have
+ // created would not be deleted. FIXME: We should provide a more direct way
+ // for CodeGen to ask whether the constructor was deleted.
+ if (!RD->hasUserDeclaredCopyConstructor() &&
+ !RD->hasUserDeclaredMoveConstructor() &&
+ !RD->needsOverloadResolutionForMoveConstructor() &&
+ !RD->hasUserDeclaredMoveAssignment() &&
+ !RD->needsOverloadResolutionForMoveAssignment())
+ return RAA_Default;
+
+ // Otherwise, Sema should have created an implicit copy constructor if
+ // needed.
+ assert(!RD->needsImplicitCopyConstructor());
+
+ // We have to make sure the trivial copy constructor isn't deleted.
for (const CXXConstructorDecl *CD : RD->ctors()) {
if (CD->isCopyConstructor()) {
assert(CD->isTrivial());
// We had at least one undeleted trivial copy ctor. Return directly.
if (!CD->isDeleted())
return RAA_Default;
- CopyDeleted = true;
}
}
// The trivial copy constructor was deleted. Return indirectly.
- if (CopyDeleted)
- return RAA_Indirect;
-
- // There were no copy ctors. Return in RAX.
- return RAA_Default;
+ return RAA_Indirect;
}
llvm_unreachable("invalid enum");
@@ -1121,6 +1138,25 @@ MicrosoftCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
return SkipVbaseCtorsBB;
}
+llvm::BasicBlock *
+MicrosoftCXXABI::EmitDtorCompleteObjectHandler(CodeGenFunction &CGF) {
+ llvm::Value *IsMostDerivedClass = getStructorImplicitParamValue(CGF);
+ assert(IsMostDerivedClass &&
+ "ctor for a class with virtual bases must have an implicit parameter");
+ llvm::Value *IsCompleteObject =
+ CGF.Builder.CreateIsNotNull(IsMostDerivedClass, "is_complete_object");
+
+ llvm::BasicBlock *CallVbaseDtorsBB = CGF.createBasicBlock("Dtor.dtor_vbases");
+ llvm::BasicBlock *SkipVbaseDtorsBB = CGF.createBasicBlock("Dtor.skip_vbases");
+ CGF.Builder.CreateCondBr(IsCompleteObject,
+ CallVbaseDtorsBB, SkipVbaseDtorsBB);
+
+ CGF.EmitBlock(CallVbaseDtorsBB);
+ // CGF will put the base dtor calls in this basic block for us later.
+
+ return SkipVbaseDtorsBB;
+}
+
void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
CodeGenFunction &CGF, const CXXRecordDecl *RD) {
// In most cases, an override for a vbase virtual method can adjust
@@ -1208,10 +1244,10 @@ void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) {
- const VPtrInfo *VBT = (*VBGlobals.VBTables)[I];
+ const std::unique_ptr<VPtrInfo> &VBT = (*VBGlobals.VBTables)[I];
llvm::GlobalVariable *GV = VBGlobals.Globals[I];
const ASTRecordLayout &SubobjectLayout =
- Context.getASTRecordLayout(VBT->BaseWithVPtr);
+ Context.getASTRecordLayout(VBT->IntroducingObject);
CharUnits Offs = VBT->NonVirtualOffset;
Offs += SubobjectLayout.getVBPtrOffset();
if (VBT->getVBaseWithVPtr())
@@ -1220,7 +1256,7 @@ void MicrosoftCXXABI::EmitVBPtrStores(CodeGenFunction &CGF,
llvm::Value *GVPtr =
CGF.Builder.CreateConstInBoundsGEP2_32(GV->getValueType(), GV, 0, 0);
VBPtr = CGF.Builder.CreateElementBitCast(VBPtr, GVPtr->getType(),
- "vbptr." + VBT->ReusingBase->getName());
+ "vbptr." + VBT->ObjectWithVPtr->getName());
CGF.Builder.CreateStore(GVPtr, VBPtr);
}
}
@@ -1417,6 +1453,10 @@ llvm::Value *MicrosoftCXXABI::adjustThisParameterInVirtualFunctionPrologue(
}
void MicrosoftCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ // Naked functions have no prolog.
+ if (CGF.CurFuncDecl && CGF.CurFuncDecl->hasAttr<NakedAttr>())
+ return;
+
EmitThisParam(CGF);
/// If this is a function that the ABI specifies returns 'this', initialize
@@ -1484,7 +1524,9 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
const CXXDestructorDecl *DD,
CXXDtorType Type, bool ForVirtualBase,
bool Delegating, Address This) {
- llvm::Value *Callee = CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type));
+ CGCallee Callee = CGCallee::forDirect(
+ CGM.getAddrOfCXXStructor(DD, getFromDtorType(Type)),
+ DD);
if (DD->isVirtual()) {
assert(Type != CXXDtorType::Dtor_Deleting &&
@@ -1492,14 +1534,24 @@ void MicrosoftCXXABI::EmitDestructorCall(CodeGenFunction &CGF,
This = adjustThisArgumentForVirtualFunctionCall(CGF, GlobalDecl(DD, Type),
This, false);
}
+
+ llvm::BasicBlock *BaseDtorEndBB = nullptr;
+ if (ForVirtualBase && isa<CXXConstructorDecl>(CGF.CurCodeDecl)) {
+ BaseDtorEndBB = EmitDtorCompleteObjectHandler(CGF);
+ }
CGF.EmitCXXDestructorCall(DD, Callee, This.getPointer(),
/*ImplicitParam=*/nullptr,
/*ImplicitParamTy=*/QualType(), nullptr,
getFromDtorType(Type));
+ if (BaseDtorEndBB) {
+ // Complete object handler should continue to be the remaining
+ CGF.Builder.CreateBr(BaseDtorEndBB);
+ CGF.EmitBlock(BaseDtorEndBB);
+ }
}
-void MicrosoftCXXABI::emitVTableTypeMetadata(VPtrInfo *Info,
+void MicrosoftCXXABI::emitVTableTypeMetadata(const VPtrInfo &Info,
const CXXRecordDecl *RD,
llvm::GlobalVariable *VTable) {
if (!CGM.getCodeGenOpts().PrepareForLTO)
@@ -1514,20 +1566,20 @@ void MicrosoftCXXABI::emitVTableTypeMetadata(VPtrInfo *Info,
getContext().getTargetInfo().getPointerWidth(0))
: CharUnits::Zero();
- if (Info->PathToBaseWithVPtr.empty()) {
+ if (Info.PathToIntroducingObject.empty()) {
CGM.AddVTableTypeMetadata(VTable, AddressPoint, RD);
return;
}
// Add a bitset entry for the least derived base belonging to this vftable.
CGM.AddVTableTypeMetadata(VTable, AddressPoint,
- Info->PathToBaseWithVPtr.back());
+ Info.PathToIntroducingObject.back());
// Add a bitset entry for each derived class that is laid out at the same
// offset as the least derived base.
- for (unsigned I = Info->PathToBaseWithVPtr.size() - 1; I != 0; --I) {
- const CXXRecordDecl *DerivedRD = Info->PathToBaseWithVPtr[I - 1];
- const CXXRecordDecl *BaseRD = Info->PathToBaseWithVPtr[I];
+ for (unsigned I = Info.PathToIntroducingObject.size() - 1; I != 0; --I) {
+ const CXXRecordDecl *DerivedRD = Info.PathToIntroducingObject[I - 1];
+ const CXXRecordDecl *BaseRD = Info.PathToIntroducingObject[I];
const ASTRecordLayout &Layout =
getContext().getASTRecordLayout(DerivedRD);
@@ -1543,7 +1595,7 @@ void MicrosoftCXXABI::emitVTableTypeMetadata(VPtrInfo *Info,
}
// Finally do the same for the most derived class.
- if (Info->FullOffsetInMDC.isZero())
+ if (Info.FullOffsetInMDC.isZero())
CGM.AddVTableTypeMetadata(VTable, AddressPoint, RD);
}
@@ -1552,7 +1604,7 @@ void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
const VPtrInfoVector &VFPtrs = VFTContext.getVFPtrOffsets(RD);
- for (VPtrInfo *Info : VFPtrs) {
+ for (const std::unique_ptr<VPtrInfo>& Info : VFPtrs) {
llvm::GlobalVariable *VTable = getAddrOfVTable(RD, Info->FullOffsetInMDC);
if (VTable->hasInitializer())
continue;
@@ -1563,16 +1615,14 @@ void MicrosoftCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
llvm::Constant *RTTI = nullptr;
if (any_of(VTLayout.vtable_components(),
[](const VTableComponent &VTC) { return VTC.isRTTIKind(); }))
- RTTI = getMSCompleteObjectLocator(RD, Info);
+ RTTI = getMSCompleteObjectLocator(RD, *Info);
- llvm::Constant *Init = CGVT.CreateVTableInitializer(
- RD, VTLayout.vtable_component_begin(),
- VTLayout.getNumVTableComponents(), VTLayout.vtable_thunk_begin(),
- VTLayout.getNumVTableThunks(), RTTI);
+ ConstantInitBuilder Builder(CGM);
+ auto Components = Builder.beginStruct();
+ CGVT.createVTableInitializer(Components, VTLayout, RTTI);
+ Components.finishAndSetAsInitializer(VTable);
- VTable->setInitializer(Init);
-
- emitVTableTypeMetadata(Info, RD, VTable);
+ emitVTableTypeMetadata(*Info, RD, VTable);
}
}
@@ -1593,10 +1643,10 @@ llvm::Value *MicrosoftCXXABI::getVTableAddressPointInStructor(
}
static void mangleVFTableName(MicrosoftMangleContext &MangleContext,
- const CXXRecordDecl *RD, const VPtrInfo *VFPtr,
+ const CXXRecordDecl *RD, const VPtrInfo &VFPtr,
SmallString<256> &Name) {
llvm::raw_svector_ostream Out(Name);
- MangleContext.mangleCXXVFTable(RD, VFPtr->MangledPath, Out);
+ MangleContext.mangleCXXVFTable(RD, VFPtr.MangledPath, Out);
}
llvm::Constant *
@@ -1643,25 +1693,25 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
llvm::StringSet<> ObservedMangledNames;
for (size_t J = 0, F = VFPtrs.size(); J != F; ++J) {
SmallString<256> Name;
- mangleVFTableName(getMangleContext(), RD, VFPtrs[J], Name);
+ mangleVFTableName(getMangleContext(), RD, *VFPtrs[J], Name);
if (!ObservedMangledNames.insert(Name.str()).second)
llvm_unreachable("Already saw this mangling before?");
}
#endif
}
- VPtrInfo *const *VFPtrI =
- std::find_if(VFPtrs.begin(), VFPtrs.end(), [&](VPtrInfo *VPI) {
+ const std::unique_ptr<VPtrInfo> *VFPtrI = std::find_if(
+ VFPtrs.begin(), VFPtrs.end(), [&](const std::unique_ptr<VPtrInfo>& VPI) {
return VPI->FullOffsetInMDC == VPtrOffset;
});
if (VFPtrI == VFPtrs.end()) {
VFTablesMap[ID] = nullptr;
return nullptr;
}
- VPtrInfo *VFPtr = *VFPtrI;
+ const std::unique_ptr<VPtrInfo> &VFPtr = *VFPtrI;
SmallString<256> VFTableName;
- mangleVFTableName(getMangleContext(), RD, VFPtr, VFTableName);
+ mangleVFTableName(getMangleContext(), RD, *VFPtr, VFTableName);
// Classes marked __declspec(dllimport) need vftables generated on the
// import-side in order to support features like constexpr. No other
@@ -1689,16 +1739,14 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
return VTable;
}
- uint64_t NumVTableSlots =
- VTContext.getVFTableLayout(RD, VFPtr->FullOffsetInMDC)
- .getNumVTableComponents();
+ const VTableLayout &VTLayout =
+ VTContext.getVFTableLayout(RD, VFPtr->FullOffsetInMDC);
llvm::GlobalValue::LinkageTypes VTableLinkage =
VTableAliasIsRequred ? llvm::GlobalValue::PrivateLinkage : VFTableLinkage;
StringRef VTableName = VTableAliasIsRequred ? StringRef() : VFTableName.str();
- llvm::ArrayType *VTableType =
- llvm::ArrayType::get(CGM.Int8PtrTy, NumVTableSlots);
+ llvm::Type *VTableType = CGM.getVTables().getVTableType(VTLayout);
// Create a backing variable for the contents of VTable. The VTable may
// or may not include space for a pointer to RTTI data.
@@ -1719,8 +1767,9 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
// importing it. We never reference the RTTI data directly so there is no
// need to make room for it.
if (VTableAliasIsRequred) {
- llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.IntTy, 0),
- llvm::ConstantInt::get(CGM.IntTy, 1)};
+ llvm::Value *GEPIndices[] = {llvm::ConstantInt::get(CGM.Int32Ty, 0),
+ llvm::ConstantInt::get(CGM.Int32Ty, 0),
+ llvm::ConstantInt::get(CGM.Int32Ty, 1)};
// Create a GEP which points just after the first entry in the VFTable,
// this should be the location of the first virtual method.
llvm::Constant *VTableGEP = llvm::ConstantExpr::getInBoundsGetElementPtr(
@@ -1752,54 +1801,11 @@ llvm::GlobalVariable *MicrosoftCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
return VTable;
}
-// Compute the identity of the most derived class whose virtual table is located
-// at the given offset into RD.
-static const CXXRecordDecl *getClassAtVTableLocation(ASTContext &Ctx,
- const CXXRecordDecl *RD,
- CharUnits Offset) {
- if (Offset.isZero())
- return RD;
-
- const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
- const CXXRecordDecl *MaxBase = nullptr;
- CharUnits MaxBaseOffset;
- for (auto &&B : RD->bases()) {
- const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl();
- CharUnits BaseOffset = Layout.getBaseClassOffset(Base);
- if (BaseOffset <= Offset && BaseOffset >= MaxBaseOffset) {
- MaxBase = Base;
- MaxBaseOffset = BaseOffset;
- }
- }
- for (auto &&B : RD->vbases()) {
- const CXXRecordDecl *Base = B.getType()->getAsCXXRecordDecl();
- CharUnits BaseOffset = Layout.getVBaseClassOffset(Base);
- if (BaseOffset <= Offset && BaseOffset >= MaxBaseOffset) {
- MaxBase = Base;
- MaxBaseOffset = BaseOffset;
- }
- }
- assert(MaxBase);
- return getClassAtVTableLocation(Ctx, MaxBase, Offset - MaxBaseOffset);
-}
-
-// Compute the identity of the most derived class whose virtual table is located
-// at the MethodVFTableLocation ML.
-static const CXXRecordDecl *
-getClassAtVTableLocation(ASTContext &Ctx, GlobalDecl GD,
- MicrosoftVTableContext::MethodVFTableLocation &ML) {
- const CXXRecordDecl *RD = ML.VBase;
- if (!RD)
- RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
-
- return getClassAtVTableLocation(Ctx, RD, ML.VFPtrOffset);
-}
-
-llvm::Value *MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
- GlobalDecl GD,
- Address This,
- llvm::Type *Ty,
- SourceLocation Loc) {
+CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ Address This,
+ llvm::Type *Ty,
+ SourceLocation Loc) {
GD = GD.getCanonicalDecl();
CGBuilderTy &Builder = CGF.Builder;
@@ -1810,22 +1816,38 @@ llvm::Value *MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty, MethodDecl->getParent());
+ MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
MicrosoftVTableContext::MethodVFTableLocation ML =
- CGM.getMicrosoftVTableContext().getMethodVFTableLocation(GD);
+ VFTContext.getMethodVFTableLocation(GD);
+
+ // Compute the identity of the most derived class whose virtual table is
+ // located at the MethodVFTableLocation ML.
+ auto getObjectWithVPtr = [&] {
+ return llvm::find_if(VFTContext.getVFPtrOffsets(
+ ML.VBase ? ML.VBase : MethodDecl->getParent()),
+ [&](const std::unique_ptr<VPtrInfo> &Info) {
+ return Info->FullOffsetInMDC == ML.VFPtrOffset;
+ })
+ ->get()
+ ->ObjectWithVPtr;
+ };
+ llvm::Value *VFunc;
if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
- return CGF.EmitVTableTypeCheckedLoad(
- getClassAtVTableLocation(getContext(), GD, ML), VTable,
+ VFunc = CGF.EmitVTableTypeCheckedLoad(
+ getObjectWithVPtr(), VTable,
ML.Index * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
} else {
if (CGM.getCodeGenOpts().PrepareForLTO)
- CGF.EmitTypeMetadataCodeForVCall(
- getClassAtVTableLocation(getContext(), GD, ML), VTable, Loc);
+ CGF.EmitTypeMetadataCodeForVCall(getObjectWithVPtr(), VTable, Loc);
llvm::Value *VFuncPtr =
Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
- return Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
+ VFunc = Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
}
+
+ CGCallee Callee(MethodDecl, VFunc);
+ return Callee;
}
llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
@@ -1840,7 +1862,7 @@ llvm::Value *MicrosoftCXXABI::EmitVirtualDestructorCall(
const CGFunctionInfo *FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
Dtor, StructorType::Deleting);
llvm::Type *Ty = CGF.CGM.getTypes().GetFunctionType(*FInfo);
- llvm::Value *Callee = getVirtualFunctionPointer(
+ CGCallee Callee = getVirtualFunctionPointer(
CGF, GD, This, Ty, CE ? CE->getLocStart() : SourceLocation());
ASTContext &Context = getContext();
@@ -1956,7 +1978,7 @@ llvm::Function *MicrosoftCXXABI::EmitVirtualMemPtrThunk(
void MicrosoftCXXABI::emitVirtualInheritanceTables(const CXXRecordDecl *RD) {
const VBTableGlobals &VBGlobals = enumerateVBTables(RD);
for (unsigned I = 0, E = VBGlobals.VBTables->size(); I != E; ++I) {
- const VPtrInfo *VBT = (*VBGlobals.VBTables)[I];
+ const std::unique_ptr<VPtrInfo>& VBT = (*VBGlobals.VBTables)[I];
llvm::GlobalVariable *GV = VBGlobals.Globals[I];
if (GV->isDeclaration())
emitVBTableDefinition(*VBT, RD, GV);
@@ -1972,7 +1994,7 @@ MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
StringRef Name = OutName.str();
llvm::ArrayType *VBTableType =
- llvm::ArrayType::get(CGM.IntTy, 1 + VBT.ReusingBase->getNumVBases());
+ llvm::ArrayType::get(CGM.IntTy, 1 + VBT.ObjectWithVPtr->getNumVBases());
assert(!CGM.getModule().getNamedGlobal(Name) &&
"vbtable with this name already exists: mangling bug?");
@@ -1994,24 +2016,24 @@ MicrosoftCXXABI::getAddrOfVBTable(const VPtrInfo &VBT, const CXXRecordDecl *RD,
void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT,
const CXXRecordDecl *RD,
llvm::GlobalVariable *GV) const {
- const CXXRecordDecl *ReusingBase = VBT.ReusingBase;
+ const CXXRecordDecl *ObjectWithVPtr = VBT.ObjectWithVPtr;
- assert(RD->getNumVBases() && ReusingBase->getNumVBases() &&
+ assert(RD->getNumVBases() && ObjectWithVPtr->getNumVBases() &&
"should only emit vbtables for classes with vbtables");
const ASTRecordLayout &BaseLayout =
- getContext().getASTRecordLayout(VBT.BaseWithVPtr);
+ getContext().getASTRecordLayout(VBT.IntroducingObject);
const ASTRecordLayout &DerivedLayout = getContext().getASTRecordLayout(RD);
- SmallVector<llvm::Constant *, 4> Offsets(1 + ReusingBase->getNumVBases(),
+ SmallVector<llvm::Constant *, 4> Offsets(1 + ObjectWithVPtr->getNumVBases(),
nullptr);
- // The offset from ReusingBase's vbptr to itself always leads.
+ // The offset from ObjectWithVPtr's vbptr to itself always leads.
CharUnits VBPtrOffset = BaseLayout.getVBPtrOffset();
Offsets[0] = llvm::ConstantInt::get(CGM.IntTy, -VBPtrOffset.getQuantity());
MicrosoftVTableContext &Context = CGM.getMicrosoftVTableContext();
- for (const auto &I : ReusingBase->vbases()) {
+ for (const auto &I : ObjectWithVPtr->vbases()) {
const CXXRecordDecl *VBase = I.getType()->getAsCXXRecordDecl();
CharUnits Offset = DerivedLayout.getVBaseClassOffset(VBase);
assert(!Offset.isNegative());
@@ -2023,7 +2045,7 @@ void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT,
DerivedLayout.getVBaseClassOffset(VBT.getVBaseWithVPtr());
Offset -= CompleteVBPtrOffset;
- unsigned VBIndex = Context.getVBTableIndex(ReusingBase, VBase);
+ unsigned VBIndex = Context.getVBTableIndex(ObjectWithVPtr, VBase);
assert(Offsets[VBIndex] == nullptr && "The same vbindex seen twice?");
Offsets[VBIndex] = llvm::ConstantInt::get(CGM.IntTy, Offset.getQuantity());
}
@@ -2182,7 +2204,8 @@ static void emitGlobalDtorWithTLRegDtor(CodeGenFunction &CGF, const VarDecl &VD,
CGF.IntTy, DtorStub->getType(), /*IsVarArg=*/false);
llvm::Constant *TLRegDtor =
- CGF.CGM.CreateRuntimeFunction(TLRegDtorTy, "__tlregdtor");
+ CGF.CGM.CreateRuntimeFunction(TLRegDtorTy, "__tlregdtor",
+ llvm::AttributeSet(), /*Local=*/true);
if (llvm::Function *TLRegDtorFn = dyn_cast<llvm::Function>(TLRegDtor))
TLRegDtorFn->setDoesNotThrow();
@@ -2203,6 +2226,14 @@ void MicrosoftCXXABI::EmitThreadLocalInitFuncs(
CodeGenModule &CGM, ArrayRef<const VarDecl *> CXXThreadLocals,
ArrayRef<llvm::Function *> CXXThreadLocalInits,
ArrayRef<const VarDecl *> CXXThreadLocalInitVars) {
+ if (CXXThreadLocalInits.empty())
+ return;
+
+ CGM.AppendLinkerOptions(CGM.getTarget().getTriple().getArch() ==
+ llvm::Triple::x86
+ ? "/include:___dyn_tls_init@12"
+ : "/include:__dyn_tls_init");
+
// This will create a GV in the .CRT$XDU section. It will point to our
// initialization function. The CRT will call all of these function
// pointers at start-up time and, eventually, at thread-creation time.
@@ -2272,7 +2303,8 @@ static llvm::Constant *getInitThreadHeaderFn(CodeGenModule &CGM) {
FTy, "_Init_thread_header",
llvm::AttributeSet::get(CGM.getLLVMContext(),
llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::NoUnwind));
+ llvm::Attribute::NoUnwind),
+ /*Local=*/true);
}
static llvm::Constant *getInitThreadFooterFn(CodeGenModule &CGM) {
@@ -2283,7 +2315,8 @@ static llvm::Constant *getInitThreadFooterFn(CodeGenModule &CGM) {
FTy, "_Init_thread_footer",
llvm::AttributeSet::get(CGM.getLLVMContext(),
llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::NoUnwind));
+ llvm::Attribute::NoUnwind),
+ /*Local=*/true);
}
static llvm::Constant *getInitThreadAbortFn(CodeGenModule &CGM) {
@@ -2294,7 +2327,8 @@ static llvm::Constant *getInitThreadAbortFn(CodeGenModule &CGM) {
FTy, "_Init_thread_abort",
llvm::AttributeSet::get(CGM.getLLVMContext(),
llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::NoUnwind));
+ llvm::Attribute::NoUnwind),
+ /*Local=*/true);
}
namespace {
@@ -3222,7 +3256,7 @@ llvm::Constant *MicrosoftCXXABI::EmitMemberPointerConversion(
return Dst;
}
-llvm::Value *MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
+CGCallee MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, Address This,
llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
const MemberPointerType *MPT) {
@@ -3269,7 +3303,10 @@ llvm::Value *MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
"this.adjusted");
}
- return Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo());
+ FunctionPointer =
+ Builder.CreateBitCast(FunctionPointer, FTy->getPointerTo());
+ CGCallee Callee(FPT, FunctionPointer);
+ return Callee;
}
CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
@@ -3410,7 +3447,7 @@ struct MSRTTIBuilder {
llvm::GlobalVariable *
getBaseClassArray(SmallVectorImpl<MSRTTIClass> &Classes);
llvm::GlobalVariable *getClassHierarchyDescriptor();
- llvm::GlobalVariable *getCompleteObjectLocator(const VPtrInfo *Info);
+ llvm::GlobalVariable *getCompleteObjectLocator(const VPtrInfo &Info);
CodeGenModule &CGM;
ASTContext &Context;
@@ -3499,7 +3536,7 @@ llvm::GlobalVariable *MSRTTIBuilder::getClassHierarchyDescriptor() {
// Initialize the base class ClassHierarchyDescriptor.
llvm::Constant *Fields[] = {
- llvm::ConstantInt::get(CGM.IntTy, 0), // Unknown
+ llvm::ConstantInt::get(CGM.IntTy, 0), // reserved by the runtime
llvm::ConstantInt::get(CGM.IntTy, Flags),
llvm::ConstantInt::get(CGM.IntTy, Classes.size()),
ABI.getImageRelativeConstant(llvm::ConstantExpr::getInBoundsGetElementPtr(
@@ -3592,11 +3629,11 @@ MSRTTIBuilder::getBaseClassDescriptor(const MSRTTIClass &Class) {
}
llvm::GlobalVariable *
-MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo *Info) {
+MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo &Info) {
SmallString<256> MangledName;
{
llvm::raw_svector_ostream Out(MangledName);
- ABI.getMangleContext().mangleCXXRTTICompleteObjectLocator(RD, Info->MangledPath, Out);
+ ABI.getMangleContext().mangleCXXRTTICompleteObjectLocator(RD, Info.MangledPath, Out);
}
// Check to see if we've already computed this complete object locator.
@@ -3604,15 +3641,15 @@ MSRTTIBuilder::getCompleteObjectLocator(const VPtrInfo *Info) {
return COL;
// Compute the fields of the complete object locator.
- int OffsetToTop = Info->FullOffsetInMDC.getQuantity();
+ int OffsetToTop = Info.FullOffsetInMDC.getQuantity();
int VFPtrOffset = 0;
// The offset includes the vtordisp if one exists.
- if (const CXXRecordDecl *VBase = Info->getVBaseWithVPtr())
+ if (const CXXRecordDecl *VBase = Info.getVBaseWithVPtr())
if (Context.getASTRecordLayout(RD)
.getVBaseOffsetsMap()
.find(VBase)
->second.hasVtorDisp())
- VFPtrOffset = Info->NonVirtualOffset.getQuantity() + 4;
+ VFPtrOffset = Info.NonVirtualOffset.getQuantity() + 4;
// Forward-declare the complete object locator.
llvm::StructType *Type = ABI.getCompleteObjectLocatorType();
@@ -3740,7 +3777,7 @@ llvm::Constant *MicrosoftCXXABI::getAddrOfRTTIDescriptor(QualType Type) {
/// \brief Gets or a creates a Microsoft CompleteObjectLocator.
llvm::GlobalVariable *
MicrosoftCXXABI::getMSCompleteObjectLocator(const CXXRecordDecl *RD,
- const VPtrInfo *Info) {
+ const VPtrInfo &Info) {
return MSRTTIBuilder(*this, RD).getCompleteObjectLocator(Info);
}
@@ -3846,8 +3883,11 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
FunctionArgs.push_back(&IsMostDerived);
// Start defining the function.
+ auto NL = ApplyDebugLocation::CreateEmpty(CGF);
CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo,
FunctionArgs, CD->getLocation(), SourceLocation());
+ // Create a scope with an artificial location for the body of this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(CGF);
EmitThisParam(CGF);
llvm::Value *This = getThisValue(CGF);
@@ -3865,11 +3905,11 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
Args.add(RValue::get(SrcVal), SrcParam.getType());
// Add the rest of the default arguments.
- std::vector<Stmt *> ArgVec;
- for (unsigned I = IsCopy ? 1 : 0, E = CD->getNumParams(); I != E; ++I) {
- Stmt *DefaultArg = getContext().getDefaultArgExprForConstructor(CD, I);
- assert(DefaultArg && "sema forgot to instantiate default args");
- ArgVec.push_back(DefaultArg);
+ SmallVector<const Stmt *, 4> ArgVec;
+ ArrayRef<ParmVarDecl *> params = CD->parameters().drop_front(IsCopy ? 1 : 0);
+ for (const ParmVarDecl *PD : params) {
+ assert(PD->hasDefaultArg() && "ctor closure lacks default args");
+ ArgVec.push_back(PD->getDefaultArg());
}
CodeGenFunction::RunCleanupsScope Cleanups(CGF);
@@ -3883,10 +3923,12 @@ MicrosoftCXXABI::getAddrOfCXXCtorClosure(const CXXConstructorDecl *CD,
/*Delegating=*/false, Args);
// Call the destructor with our arguments.
- llvm::Value *CalleeFn = CGM.getAddrOfCXXStructor(CD, StructorType::Complete);
+ llvm::Constant *CalleePtr =
+ CGM.getAddrOfCXXStructor(CD, StructorType::Complete);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, CD);
const CGFunctionInfo &CalleeInfo = CGM.getTypes().arrangeCXXConstructorCall(
Args, CD, Ctor_Complete, ExtraArgs);
- CGF.EmitCall(CalleeInfo, CalleeFn, ReturnValueSlot(), Args, CD);
+ CGF.EmitCall(CalleeInfo, Callee, ReturnValueSlot(), Args);
Cleanups.ForceCleanup();
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index 952d1627fa84..f925c2549175 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -112,7 +112,7 @@ namespace {
}
llvm::Constant *GetAddrOfGlobal(GlobalDecl global, bool isForDefinition) {
- return Builder->GetAddrOfGlobal(global, isForDefinition);
+ return Builder->GetAddrOfGlobal(global, ForDefinition_t(isForDefinition));
}
void Initialize(ASTContext &Context) override {
diff --git a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index de40e4121124..baf7811eedaf 100644
--- a/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -312,27 +312,30 @@ ObjectFilePCHContainerWriter::CreatePCHContainerGenerator(
CI, MainFileName, OutputFileName, std::move(OS), Buffer);
}
-void ObjectFilePCHContainerReader::ExtractPCH(
- llvm::MemoryBufferRef Buffer, llvm::BitstreamReader &StreamFile) const {
- if (auto OF = llvm::object::ObjectFile::createObjectFile(Buffer)) {
- auto *Obj = OF.get().get();
- bool IsCOFF = isa<llvm::object::COFFObjectFile>(Obj);
+StringRef
+ObjectFilePCHContainerReader::ExtractPCH(llvm::MemoryBufferRef Buffer) const {
+ StringRef PCH;
+ auto OFOrErr = llvm::object::ObjectFile::createObjectFile(Buffer);
+ if (OFOrErr) {
+ auto &OF = OFOrErr.get();
+ bool IsCOFF = isa<llvm::object::COFFObjectFile>(*OF);
// Find the clang AST section in the container.
- for (auto &Section : OF->get()->sections()) {
+ for (auto &Section : OF->sections()) {
StringRef Name;
Section.getName(Name);
- if ((!IsCOFF && Name == "__clangast") ||
- ( IsCOFF && Name == "clangast")) {
- StringRef Buf;
- Section.getContents(Buf);
- StreamFile.init((const unsigned char *)Buf.begin(),
- (const unsigned char *)Buf.end());
- return;
+ if ((!IsCOFF && Name == "__clangast") || (IsCOFF && Name == "clangast")) {
+ Section.getContents(PCH);
+ return PCH;
}
}
}
-
- // As a fallback, treat the buffer as a raw AST.
- StreamFile.init((const unsigned char *)Buffer.getBufferStart(),
- (const unsigned char *)Buffer.getBufferEnd());
+ handleAllErrors(OFOrErr.takeError(), [&](const llvm::ErrorInfoBase &EIB) {
+ if (EIB.convertToErrorCode() ==
+ llvm::object::object_error::invalid_file_type)
+ // As a fallback, treat the buffer as a raw AST.
+ PCH = Buffer.getBuffer();
+ else
+ EIB.log(llvm::errs());
+ });
+ return PCH;
}
diff --git a/lib/CodeGen/SanitizerMetadata.cpp b/lib/CodeGen/SanitizerMetadata.cpp
index 2a338bac4b41..9848e3e452f4 100644
--- a/lib/CodeGen/SanitizerMetadata.cpp
+++ b/lib/CodeGen/SanitizerMetadata.cpp
@@ -63,7 +63,13 @@ void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
std::string QualName;
llvm::raw_string_ostream OS(QualName);
D.printQualifiedName(OS);
- reportGlobalToASan(GV, D.getLocation(), OS.str(), D.getType(), IsDynInit);
+
+ bool IsBlacklisted = false;
+ for (auto Attr : D.specific_attrs<NoSanitizeAttr>())
+ if (Attr->getMask() & SanitizerKind::Address)
+ IsBlacklisted = true;
+ reportGlobalToASan(GV, D.getLocation(), OS.str(), D.getType(), IsDynInit,
+ IsBlacklisted);
}
void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
diff --git a/lib/CodeGen/SwiftCallingConv.cpp b/lib/CodeGen/SwiftCallingConv.cpp
index 6c20f8c9d3e9..0bfe30a32c80 100644
--- a/lib/CodeGen/SwiftCallingConv.cpp
+++ b/lib/CodeGen/SwiftCallingConv.cpp
@@ -384,7 +384,7 @@ void SwiftAggLowering::splitVectorEntry(unsigned index) {
auto eltTy = split.first;
CharUnits eltSize = getTypeStoreSize(CGM, eltTy);
auto numElts = split.second;
- Entries.insert(&Entries[index + 1], numElts - 1, StorageEntry());
+ Entries.insert(Entries.begin() + index + 1, numElts - 1, StorageEntry());
CharUnits begin = Entries[index].Begin;
for (unsigned i = 0; i != numElts; ++i) {
@@ -506,7 +506,7 @@ void SwiftAggLowering::enumerateComponents(EnumerationCallback callback) const {
assert(Finished && "haven't yet finished lowering");
for (auto &entry : Entries) {
- callback(entry.Begin, entry.Type);
+ callback(entry.Begin, entry.End, entry.Type);
}
}
@@ -828,3 +828,8 @@ void swiftcall::computeABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
argInfo.info = classifyArgumentType(CGM, argInfo.type);
}
}
+
+// Is swifterror lowered to a register by the target ABI.
+bool swiftcall::isSwiftErrorLoweredInRegister(CodeGenModule &CGM) {
+ return getSwiftABIInfo(CGM).isSwiftErrorInRegister();
+}
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index aa67e71284ae..391eb53d2500 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -31,6 +31,31 @@
using namespace clang;
using namespace CodeGen;
+// Helper for coercing an aggregate argument or return value into an integer
+// array of the same size (including padding) and alignment. This alternate
+// coercion happens only for the RenderScript ABI and can be removed after
+// runtimes that rely on it are no longer supported.
+//
+// RenderScript assumes that the size of the argument / return value in the IR
+// is the same as the size of the corresponding qualified type. This helper
+// coerces the aggregate type into an array of the same size (including
+// padding). This coercion is used in lieu of expansion of struct members or
+// other canonical coercions that return a coerced-type of larger size.
+//
+// Ty - The argument / return value type
+// Context - The associated ASTContext
+// LLVMContext - The associated LLVMContext
+static ABIArgInfo coerceToIntArray(QualType Ty,
+ ASTContext &Context,
+ llvm::LLVMContext &LLVMContext) {
+ // Alignment and Size are measured in bits.
+ const uint64_t Size = Context.getTypeSize(Ty);
+ const uint64_t Alignment = Context.getTypeAlign(Ty);
+ llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
+ const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
+}
+
static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
llvm::Value *Array,
llvm::Value *Value,
@@ -375,6 +400,21 @@ TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
return llvm::CallingConv::C;
}
+
+llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
+ llvm::PointerType *T, QualType QT) const {
+ return llvm::ConstantPointerNull::get(T);
+}
+
+llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
+ CodeGen::CodeGenFunction &CGF, llvm::Value *Src, QualType SrcTy,
+ QualType DestTy) const {
+ // Since target may map different address spaces in AST to the same address
+ // space, an address space conversion may end up as a bitcast.
+ return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src,
+ CGF.ConvertType(DestTy));
+}
+
static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
/// isEmptyField - Return true iff a the field is "empty", that is it
@@ -932,6 +972,11 @@ public:
// scalar registers.
return occupiesMoreThan(CGT, scalars, /*total*/ 3);
}
+
+ bool isSwiftErrorInRegister() const override {
+ // x86-32 lowering does not support passing swifterror in a register.
+ return false;
+ }
};
class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -1203,7 +1248,8 @@ ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
const Type *Base = nullptr;
uint64_t NumElts = 0;
- if (State.CC == llvm::CallingConv::X86_VectorCall &&
+ if ((State.CC == llvm::CallingConv::X86_VectorCall ||
+ State.CC == llvm::CallingConv::X86_RegCall) &&
isHomogeneousAggregate(RetTy, Base, NumElts)) {
// The LLVM struct type for such an aggregate should lower properly.
return ABIArgInfo::getDirect();
@@ -1417,7 +1463,8 @@ bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
return true;
if (State.CC == llvm::CallingConv::X86_FastCall ||
- State.CC == llvm::CallingConv::X86_VectorCall) {
+ State.CC == llvm::CallingConv::X86_VectorCall ||
+ State.CC == llvm::CallingConv::X86_RegCall) {
if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
NeedsPadding = true;
@@ -1435,7 +1482,8 @@ bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
return false;
if (State.CC == llvm::CallingConv::X86_FastCall ||
- State.CC == llvm::CallingConv::X86_VectorCall) {
+ State.CC == llvm::CallingConv::X86_VectorCall ||
+ State.CC == llvm::CallingConv::X86_RegCall) {
if (getContext().getTypeSize(Ty) > 32)
return false;
@@ -1468,7 +1516,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
// to other targets.
const Type *Base = nullptr;
uint64_t NumElts = 0;
- if (State.CC == llvm::CallingConv::X86_VectorCall &&
+ if ((State.CC == llvm::CallingConv::X86_VectorCall ||
+ State.CC == llvm::CallingConv::X86_RegCall) &&
isHomogeneousAggregate(Ty, Base, NumElts)) {
if (State.FreeSSERegs >= NumElts) {
State.FreeSSERegs -= NumElts;
@@ -1514,7 +1563,8 @@ ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
(!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
return ABIArgInfo::getExpandWithPadding(
State.CC == llvm::CallingConv::X86_FastCall ||
- State.CC == llvm::CallingConv::X86_VectorCall,
+ State.CC == llvm::CallingConv::X86_VectorCall ||
+ State.CC == llvm::CallingConv::X86_RegCall,
PaddingType);
return getIndirectResult(Ty, true, State);
@@ -1565,7 +1615,10 @@ void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
State.FreeSSERegs = 6;
} else if (FI.getHasRegParm())
State.FreeRegs = FI.getRegParm();
- else
+ else if (State.CC == llvm::CallingConv::X86_RegCall) {
+ State.FreeRegs = 5;
+ State.FreeSSERegs = 8;
+ } else
State.FreeRegs = DefaultNumRegisterParameters;
if (!getCXXABI().classifyReturnType(FI)) {
@@ -1906,12 +1959,16 @@ class X86_64ABIInfo : public SwiftABIInfo {
ABIArgInfo classifyReturnType(QualType RetTy) const;
- ABIArgInfo classifyArgumentType(QualType Ty,
- unsigned freeIntRegs,
- unsigned &neededInt,
- unsigned &neededSSE,
+ ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
+ unsigned &neededInt, unsigned &neededSSE,
bool isNamedArg) const;
+ ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE) const;
+
+ ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE) const;
+
bool IsIllegalVectorType(QualType Ty) const;
/// The 0.98 ABI revision clarified a lot of ambiguities,
@@ -1974,13 +2031,16 @@ public:
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
+ bool isSwiftErrorInRegister() const override {
+ return true;
+ }
};
/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
-class WinX86_64ABIInfo : public ABIInfo {
+class WinX86_64ABIInfo : public SwiftABIInfo {
public:
WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
- : ABIInfo(CGT),
+ : SwiftABIInfo(CGT),
IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
void computeInfo(CGFunctionInfo &FI) const override;
@@ -1999,6 +2059,16 @@ public:
return isX86VectorCallAggregateSmallEnough(NumMembers);
}
+ bool shouldPassIndirectlyForSwift(CharUnits totalSize,
+ ArrayRef<llvm::Type *> scalars,
+ bool asReturnValue) const override {
+ return occupiesMoreThan(CGT, scalars, /*total*/ 4);
+ }
+
+ bool isSwiftErrorInRegister() const override {
+ return true;
+ }
+
private:
ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
bool IsReturnType) const;
@@ -2315,13 +2385,13 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Current = SSE;
} else if (k == BuiltinType::LongDouble) {
const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::IEEEquad) {
+ if (LDF == &llvm::APFloat::IEEEquad()) {
Lo = SSE;
Hi = SSEUp;
- } else if (LDF == &llvm::APFloat::x87DoubleExtended) {
+ } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
Lo = X87;
Hi = X87Up;
- } else if (LDF == &llvm::APFloat::IEEEdouble) {
+ } else if (LDF == &llvm::APFloat::IEEEdouble()) {
Current = SSE;
} else
llvm_unreachable("unexpected long double representation!");
@@ -2440,11 +2510,11 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
Lo = Hi = SSE;
} else if (ET == getContext().LongDoubleTy) {
const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::IEEEquad)
+ if (LDF == &llvm::APFloat::IEEEquad())
Current = Memory;
- else if (LDF == &llvm::APFloat::x87DoubleExtended)
+ else if (LDF == &llvm::APFloat::x87DoubleExtended())
Current = ComplexX87;
- else if (LDF == &llvm::APFloat::IEEEdouble)
+ else if (LDF == &llvm::APFloat::IEEEdouble())
Lo = Hi = SSE;
else
llvm_unreachable("unexpected long double representation!");
@@ -2466,8 +2536,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than four eightbytes, ..., it has class MEMORY.
- if (Size > 256)
+ // than eight eightbytes, ..., it has class MEMORY.
+ if (Size > 512)
return;
// AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
@@ -2486,7 +2556,9 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// The only case a 256-bit wide vector could be used is when the array
// contains a single 256-bit element. Since Lo and Hi logic isn't extended
// to work for sizes wider than 128, early check and fallback to memory.
- if (Size > 128 && EltSize != 256)
+ //
+ if (Size > 128 &&
+ (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
return;
for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
@@ -2507,8 +2579,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Size = getContext().getTypeSize(Ty);
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
- // than four eightbytes, ..., it has class MEMORY.
- if (Size > 256)
+ // than eight eightbytes, ..., it has class MEMORY.
+ if (Size > 512)
return;
// AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
@@ -2561,6 +2633,10 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
bool BitField = i->isBitField();
+ // Ignore padding bit-fields.
+ if (BitField && i->isUnnamedBitfield())
+ continue;
+
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
// four eightbytes, or it contains unaligned fields, it has class MEMORY.
//
@@ -2568,7 +2644,8 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// contains a single 256-bit element. Since Lo and Hi logic isn't extended
// to work for sizes wider than 128, early check and fallback to memory.
//
- if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
+ if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
+ Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
Lo = Memory;
postMerge(Size, Lo, Hi);
return;
@@ -2592,10 +2669,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// structure to be passed in memory even if unaligned, and
// therefore they can straddle an eightbyte.
if (BitField) {
- // Ignore padding bit-fields.
- if (i->isUnnamedBitfield())
- continue;
-
+ assert(!i->isUnnamedBitfield());
uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
uint64_t Size = i->getBitWidthValue(getContext());
@@ -2723,7 +2797,7 @@ llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
// We couldn't find the preferred IR vector type for 'Ty'.
uint64_t Size = getContext().getTypeSize(Ty);
- assert((Size == 128 || Size == 256) && "Invalid type found!");
+ assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
// Return a LLVM IR vector type based on the size of 'Ty'.
return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
@@ -3247,22 +3321,94 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(
return ABIArgInfo::getDirect(ResType);
}
+ABIArgInfo
+X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE) const {
+ auto RT = Ty->getAs<RecordType>();
+ assert(RT && "classifyRegCallStructType only valid with struct types");
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectReturnResult(Ty);
+
+ // Sum up bases
+ if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (CXXRD->isDynamicClass()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+
+ for (const auto &I : CXXRD->bases())
+ if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
+ .isIndirect()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+ }
+
+ // Sum up members
+ for (const auto *FD : RT->getDecl()->fields()) {
+ if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
+ if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
+ .isIndirect()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+ } else {
+ unsigned LocalNeededInt, LocalNeededSSE;
+ if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
+ LocalNeededSSE, true)
+ .isIndirect()) {
+ NeededInt = NeededSSE = 0;
+ return getIndirectReturnResult(Ty);
+ }
+ NeededInt += LocalNeededInt;
+ NeededSSE += LocalNeededSSE;
+ }
+ }
+
+ return ABIArgInfo::getDirect();
+}
+
+ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
+ unsigned &NeededInt,
+ unsigned &NeededSSE) const {
+
+ NeededInt = 0;
+ NeededSSE = 0;
+
+ return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
+}
+
void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
- if (!getCXXABI().classifyReturnType(FI))
- FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
// Keep track of the number of assigned registers.
- unsigned freeIntRegs = 6, freeSSERegs = 8;
+ unsigned FreeIntRegs = IsRegCall ? 11 : 6;
+ unsigned FreeSSERegs = IsRegCall ? 16 : 8;
+ unsigned NeededInt, NeededSSE;
+
+ if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
+ !FI.getReturnType()->getTypePtr()->isUnionType()) {
+ FI.getReturnInfo() =
+ classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
+ if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
+ FreeIntRegs -= NeededInt;
+ FreeSSERegs -= NeededSSE;
+ } else {
+ FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
+ }
+ } else if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
// If the return value is indirect, then the hidden argument is consuming one
// integer register.
if (FI.getReturnInfo().isIndirect())
- --freeIntRegs;
+ --FreeIntRegs;
// The chain argument effectively gives us another free register.
if (FI.isChainCall())
- ++freeIntRegs;
+ ++FreeIntRegs;
unsigned NumRequiredArgs = FI.getNumRequiredArgs();
// AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
@@ -3272,19 +3418,21 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
it != ie; ++it, ++ArgNo) {
bool IsNamedArg = ArgNo < NumRequiredArgs;
- unsigned neededInt, neededSSE;
- it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
- neededSSE, IsNamedArg);
+ if (IsRegCall && it->type->isStructureOrClassType())
+ it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
+ else
+ it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
+ NeededSSE, IsNamedArg);
// AMD64-ABI 3.2.3p3: If there are no registers available for any
// eightbyte of an argument, the whole argument is passed on the
// stack. If registers have already been assigned for some
// eightbytes of such an argument, the assignments get reverted.
- if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
- freeIntRegs -= neededInt;
- freeSSERegs -= neededSSE;
+ if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
+ FreeIntRegs -= NeededInt;
+ FreeSSERegs -= NeededSSE;
} else {
- it->info = getIndirectResult(it->type, freeIntRegs);
+ it->info = getIndirectResult(it->type, FreeIntRegs);
}
}
}
@@ -3426,15 +3574,17 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
// Copy the first element.
- llvm::Value *V =
- CGF.Builder.CreateDefaultAlignedLoad(
- CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ // FIXME: Our choice of alignment here and below is probably pessimistic.
+ llvm::Value *V = CGF.Builder.CreateAlignedLoad(
+ TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
CGF.Builder.CreateStore(V,
CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
// Copy the second element.
- V = CGF.Builder.CreateDefaultAlignedLoad(
- CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ V = CGF.Builder.CreateAlignedLoad(
+ TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
+ CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
CharUnits Offset = CharUnits::fromQuantity(
getDataLayout().getStructLayout(ST)->getElementOffset(1));
CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
@@ -3597,7 +3747,7 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
// passes them indirectly through memory.
if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
- if (LDF == &llvm::APFloat::x87DoubleExtended)
+ if (LDF == &llvm::APFloat::x87DoubleExtended())
return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
}
@@ -3607,21 +3757,44 @@ ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
bool IsVectorCall =
FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
+ bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
+
+ unsigned FreeSSERegs = 0;
+ if (IsVectorCall) {
+ // We can use up to 4 SSE return registers with vectorcall.
+ FreeSSERegs = 4;
+ } else if (IsRegCall) {
+ // RegCall gives us 16 SSE registers.
+ FreeSSERegs = 16;
+ }
- // We can use up to 4 SSE return registers with vectorcall.
- unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
if (!getCXXABI().classifyReturnType(FI))
FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
- // We can use up to 6 SSE register parameters with vectorcall.
- FreeSSERegs = IsVectorCall ? 6 : 0;
+ if (IsVectorCall) {
+ // We can use up to 6 SSE register parameters with vectorcall.
+ FreeSSERegs = 6;
+ } else if (IsRegCall) {
+ FreeSSERegs = 16;
+ }
+
for (auto &I : FI.arguments())
I.info = classify(I.type, FreeSSERegs, false);
}
Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+
+ bool IsIndirect = false;
+
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
+ uint64_t Width = getContext().getTypeSize(Ty);
+ IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
+ }
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
CGF.getContext().getTypeInfoInChars(Ty),
CharUnits::fromQuantity(8),
/*allowHigherAlign*/ false);
@@ -3859,6 +4032,7 @@ private:
static const unsigned GPRBits = 64;
ABIKind Kind;
bool HasQPX;
+ bool IsSoftFloatABI;
// A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
// will be passed in a QPX register.
@@ -3889,8 +4063,10 @@ private:
}
public:
- PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX)
- : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
+ PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
+ bool SoftFloatABI)
+ : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
+ IsSoftFloatABI(SoftFloatABI) {}
bool isPromotableTypeForABI(QualType Ty) const;
CharUnits getParamTypeAlignment(QualType Ty) const;
@@ -3938,8 +4114,10 @@ class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
public:
PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
- PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX)
- : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)) {}
+ PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
+ bool SoftFloatABI)
+ : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
+ SoftFloatABI)) {}
int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
// This is recovered from gcc output.
@@ -4157,8 +4335,11 @@ bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
if (BT->getKind() == BuiltinType::Float ||
BT->getKind() == BuiltinType::Double ||
- BT->getKind() == BuiltinType::LongDouble)
+ BT->getKind() == BuiltinType::LongDouble) {
+ if (IsSoftFloatABI)
+ return false;
return true;
+ }
}
if (const VectorType *VT = Ty->getAs<VectorType>()) {
if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
@@ -4373,14 +4554,17 @@ PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// 32-63: fp0-31, the 8-byte floating-point registers
AssignToArrayRange(Builder, Address, Eight8, 32, 63);
- // 64-76 are various 4-byte special-purpose registers:
+ // 64-67 are various 8-byte special-purpose registers:
// 64: mq
// 65: lr
// 66: ctr
// 67: ap
+ AssignToArrayRange(Builder, Address, Eight8, 64, 67);
+
+ // 68-76 are various 4-byte special-purpose registers:
// 68-75 cr0-7
// 76: xer
- AssignToArrayRange(Builder, Address, Four8, 64, 76);
+ AssignToArrayRange(Builder, Address, Four8, 68, 76);
// 77-108: v0-31, the 16-byte vector registers
AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
@@ -4390,7 +4574,10 @@ PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// 111: spe_acc
// 112: spefscr
// 113: sfp
- AssignToArrayRange(Builder, Address, Four8, 109, 113);
+ // 114: tfhar
+ // 115: tfiar
+ // 116: texasr
+ AssignToArrayRange(Builder, Address, Eight8, 109, 116);
return false;
}
@@ -4467,6 +4654,9 @@ private:
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
+ bool isSwiftErrorInRegister() const override {
+ return true;
+ }
};
class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
@@ -4551,6 +4741,11 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
// Aggregates <= 16 bytes are passed directly in registers or on the stack.
uint64_t Size = getContext().getTypeSize(Ty);
if (Size <= 128) {
+ // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(Ty, getContext(), getVMContext());
+ }
unsigned Alignment = getContext().getTypeAlign(Ty);
Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
@@ -4596,6 +4791,11 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
// Aggregates <= 16 bytes are returned directly in registers or on the stack.
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 128) {
+ // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(RetTy, getContext(), getVMContext());
+ }
unsigned Alignment = getContext().getTypeAlign(RetTy);
Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
@@ -5010,6 +5210,9 @@ private:
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
+ bool isSwiftErrorInRegister() const override {
+ return true;
+ }
};
class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -5286,6 +5489,12 @@ ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
/*Realign=*/TyAlign > ABIAlign);
}
+ // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(Ty, getContext(), getVMContext());
+ }
+
// Otherwise, pass by coercing to a structure of the appropriate size.
llvm::Type* ElemTy;
unsigned SizeRegs;
@@ -5467,6 +5676,11 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
// are returned indirectly.
uint64_t Size = getContext().getTypeSize(RetTy);
if (Size <= 32) {
+ // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
+ // same size and alignment.
+ if (getTarget().isRenderScriptTarget()) {
+ return coerceToIntArray(RetTy, getContext(), getVMContext());
+ }
if (getDataLayout().isBigEndian())
// Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
@@ -5767,6 +5981,9 @@ public:
bool asReturnValue) const override {
return occupiesMoreThan(CGT, scalars, /*total*/ 4);
}
+ bool isSwiftErrorInRegister() const override {
+ return true;
+ }
};
class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
@@ -6825,45 +7042,138 @@ public:
namespace {
+class AMDGPUABIInfo final : public DefaultABIInfo {
+public:
+ explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+private:
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+};
+
+void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ unsigned CC = FI.getCallingConvention();
+ for (auto &Arg : FI.arguments())
+ if (CC == llvm::CallingConv::AMDGPU_KERNEL)
+ Arg.info = classifyArgumentType(Arg.type);
+ else
+ Arg.info = DefaultABIInfo::classifyArgumentType(Arg.type);
+}
+
+/// \brief Classify argument of given type \p Ty.
+ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty) const {
+ llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
+ if (!StrTy) {
+ return DefaultABIInfo::classifyArgumentType(Ty);
+ }
+
+ // Coerce single element structs to its element.
+ if (StrTy->getNumElements() == 1) {
+ return ABIArgInfo::getDirect();
+ }
+
+ // If we set CanBeFlattened to true, CodeGen will expand the struct to its
+ // individual elements, which confuses the Clover OpenCL backend; therefore we
+ // have to set it to false here. Other args of getDirect() are just defaults.
+ return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
+}
+
class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
public:
AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const override;
unsigned getOpenCLKernelCallingConv() const override;
-};
+ llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
+ llvm::PointerType *T, QualType QT) const override;
+};
}
+static void appendOpenCLVersionMD (CodeGen::CodeGenModule &CGM);
+
void AMDGPUTargetCodeGenInfo::setTargetAttributes(
- const Decl *D,
- llvm::GlobalValue *GV,
- CodeGen::CodeGenModule &M) const {
+ const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD)
return;
- if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
- llvm::Function *F = cast<llvm::Function>(GV);
- uint32_t NumVGPR = Attr->getNumVGPR();
- if (NumVGPR != 0)
- F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ if (const auto *Attr = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>()) {
+ unsigned Min = Attr->getMin();
+ unsigned Max = Attr->getMax();
+
+ if (Min != 0) {
+ assert(Min <= Max && "Min must be less than or equal Max");
+
+ std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
+ F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
+ } else
+ assert(Max == 0 && "Max must be zero");
}
- if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
- llvm::Function *F = cast<llvm::Function>(GV);
+ if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
+ unsigned Min = Attr->getMin();
+ unsigned Max = Attr->getMax();
+
+ if (Min != 0) {
+ assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
+
+ std::string AttrVal = llvm::utostr(Min);
+ if (Max != 0)
+ AttrVal = AttrVal + "," + llvm::utostr(Max);
+ F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
+ } else
+ assert(Max == 0 && "Max must be zero");
+ }
+
+ if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
unsigned NumSGPR = Attr->getNumSGPR();
+
if (NumSGPR != 0)
- F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
+ F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
}
-}
+ if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
+ uint32_t NumVGPR = Attr->getNumVGPR();
+
+ if (NumVGPR != 0)
+ F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
+ }
+
+ appendOpenCLVersionMD(M);
+}
unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
return llvm::CallingConv::AMDGPU_KERNEL;
}
+// Currently LLVM assumes null pointers always have value 0,
+// which results in incorrectly transformed IR. Therefore, instead of
+// emitting null pointers in private and local address spaces, a null
+// pointer in generic address space is emitted which is casted to a
+// pointer in local or private address space.
+llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
+ const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
+ QualType QT) const {
+ if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
+ return llvm::ConstantPointerNull::get(PT);
+
+ auto &Ctx = CGM.getContext();
+ auto NPT = llvm::PointerType::get(PT->getElementType(),
+ Ctx.getTargetAddressSpace(LangAS::opencl_generic));
+ return llvm::ConstantExpr::getAddrSpaceCast(
+ llvm::ConstantPointerNull::get(NPT), PT);
+}
+
//===----------------------------------------------------------------------===//
// SPARC v8 ABI Implementation.
// Based on the SPARC Compliance Definition version 2.4.1.
@@ -7303,7 +7613,7 @@ class FieldEncoding {
std::string Enc;
public:
FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
- StringRef str() {return Enc.c_str();}
+ StringRef str() { return Enc; }
bool operator<(const FieldEncoding &rhs) const {
if (HasName != rhs.HasName) return HasName;
return Enc < rhs.Enc;
@@ -7469,7 +7779,7 @@ StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
E.State = IncompleteUsed;
++IncompleteUsedCount;
}
- return E.Str.c_str();
+ return E.Str;
}
/// The XCore ABI includes a type information section that communicates symbol
@@ -7525,11 +7835,20 @@ void SPIRTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
// SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
// opencl.spir.version named metadata.
llvm::Metadata *SPIRVerElts[] = {
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 2)),
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 0))};
+ llvm::ConstantAsMetadata::get(
+ llvm::ConstantInt::get(Int32Ty, CGM.getLangOpts().OpenCLVersion / 100)),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ Int32Ty, (CGM.getLangOpts().OpenCLVersion / 100 > 1) ? 0 : 2))};
llvm::NamedMDNode *SPIRVerMD =
M.getOrInsertNamedMetadata("opencl.spir.version");
SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
+ appendOpenCLVersionMD(CGM);
+}
+
+static void appendOpenCLVersionMD(CodeGen::CodeGenModule &CGM) {
+ llvm::LLVMContext &Ctx = CGM.getModule().getContext();
+ llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Ctx);
+ llvm::Module &M = CGM.getModule();
// SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
// opencl.ocl.version named metadata node.
llvm::Metadata *OCLVerElts[] = {
@@ -7882,10 +8201,6 @@ static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
// Driver code
//===----------------------------------------------------------------------===//
-const llvm::Triple &CodeGenModule::getTriple() const {
- return getTarget().getTriple();
-}
-
bool CodeGenModule::supportsCOMDAT() const {
return getTriple().supportsCOMDAT();
}
@@ -7964,8 +8279,10 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (getTarget().getABI() == "elfv2")
Kind = PPC64_SVR4_ABIInfo::ELFv2;
bool HasQPX = getTarget().getABI() == "elfv1-qpx";
+ bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
- return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
+ return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
+ IsSoftFloat));
} else
return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
case llvm::Triple::ppc64le: {
@@ -7974,8 +8291,10 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
Kind = PPC64_SVR4_ABIInfo::ELFv1;
bool HasQPX = getTarget().getABI() == "elfv1-qpx";
+ bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
- return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
+ return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
+ IsSoftFloat));
}
case llvm::Triple::nvptx:
@@ -7991,6 +8310,7 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
}
case llvm::Triple::tce:
+ case llvm::Triple::tcele:
return SetCGInfo(new TCETargetCodeGenInfo(Types));
case llvm::Triple::x86: {
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index e46382596af7..223d6d047af7 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -220,6 +220,22 @@ public:
/// Get LLVM calling convention for OpenCL kernel.
virtual unsigned getOpenCLKernelCallingConv() const;
+
+ /// Get target specific null pointer.
+ /// \param T is the LLVM type of the null pointer.
+ /// \param QT is the clang QualType of the null pointer.
+ /// \return ConstantPointerNull with the given type \p T.
+ /// Each target can override it to return its own desired constant value.
+ virtual llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
+ llvm::PointerType *T, QualType QT) const;
+
+ /// Perform address space cast of an expression of pointer type.
+ /// \param V is the LLVM value to be casted to another address space.
+ /// \param SrcTy is the QualType of \p V.
+ /// \param DestTy is the destination QualType.
+ virtual llvm::Value *performAddrSpaceCast(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *V, QualType SrcTy, QualType DestTy) const;
+
};
} // namespace CodeGen
diff --git a/lib/CodeGen/VarBypassDetector.cpp b/lib/CodeGen/VarBypassDetector.cpp
new file mode 100644
index 000000000000..cfb93d6a9fcc
--- /dev/null
+++ b/lib/CodeGen/VarBypassDetector.cpp
@@ -0,0 +1,168 @@
+//===--- VarBypassDetector.h - Bypass jumps detector --------------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "VarBypassDetector.h"
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Stmt.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+/// Clear the object and pre-process for the given statement, usually function
+/// body statement.
+void VarBypassDetector::Init(const Stmt *Body) {
+ FromScopes.clear();
+ ToScopes.clear();
+ Bypasses.clear();
+ Scopes = {{~0U, nullptr}};
+ unsigned ParentScope = 0;
+ AlwaysBypassed = !BuildScopeInformation(Body, ParentScope);
+ if (!AlwaysBypassed)
+ Detect();
+}
+
+/// Build scope information for a declaration that is part of a DeclStmt.
+/// Returns false if we failed to build scope information and can't tell for
+/// which vars are being bypassed.
+bool VarBypassDetector::BuildScopeInformation(const Decl *D,
+ unsigned &ParentScope) {
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (VD && VD->hasLocalStorage()) {
+ Scopes.push_back({ParentScope, VD});
+ ParentScope = Scopes.size() - 1;
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (const Expr *Init = VD->getInit())
+ return BuildScopeInformation(Init, ParentScope);
+
+ return true;
+}
+
+/// Walk through the statements, adding any labels or gotos to
+/// LabelAndGotoScopes and recursively walking the AST as needed.
+/// Returns false if we failed to build scope information and can't tell for
+/// which vars are being bypassed.
+bool VarBypassDetector::BuildScopeInformation(const Stmt *S,
+ unsigned &origParentScope) {
+ // If this is a statement, rather than an expression, scopes within it don't
+ // propagate out into the enclosing scope. Otherwise we have to worry about
+ // block literals, which have the lifetime of their enclosing statement.
+ unsigned independentParentScope = origParentScope;
+ unsigned &ParentScope =
+ ((isa<Expr>(S) && !isa<StmtExpr>(S)) ? origParentScope
+ : independentParentScope);
+
+ unsigned StmtsToSkip = 0u;
+
+ switch (S->getStmtClass()) {
+ case Stmt::IndirectGotoStmtClass:
+ return false;
+
+ case Stmt::SwitchStmtClass:
+ if (const Stmt *Init = cast<SwitchStmt>(S)->getInit()) {
+ if (!BuildScopeInformation(Init, ParentScope))
+ return false;
+ ++StmtsToSkip;
+ }
+ if (const VarDecl *Var = cast<SwitchStmt>(S)->getConditionVariable()) {
+ if (!BuildScopeInformation(Var, ParentScope))
+ return false;
+ ++StmtsToSkip;
+ }
+ // Fall through
+
+ case Stmt::GotoStmtClass:
+ FromScopes.push_back({S, ParentScope});
+ break;
+
+ case Stmt::DeclStmtClass: {
+ const DeclStmt *DS = cast<DeclStmt>(S);
+ for (auto *I : DS->decls())
+ if (!BuildScopeInformation(I, origParentScope))
+ return false;
+ return true;
+ }
+
+ case Stmt::CaseStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::LabelStmtClass:
+ llvm_unreachable("the loop bellow handles labels and cases");
+ break;
+
+ default:
+ break;
+ }
+
+ for (const Stmt *SubStmt : S->children()) {
+ if (!SubStmt)
+ continue;
+ if (StmtsToSkip) {
+ --StmtsToSkip;
+ continue;
+ }
+
+ // Cases, labels, and defaults aren't "scope parents". It's also
+ // important to handle these iteratively instead of recursively in
+ // order to avoid blowing out the stack.
+ while (true) {
+ const Stmt *Next;
+ if (const SwitchCase *SC = dyn_cast<SwitchCase>(SubStmt))
+ Next = SC->getSubStmt();
+ else if (const LabelStmt *LS = dyn_cast<LabelStmt>(SubStmt))
+ Next = LS->getSubStmt();
+ else
+ break;
+
+ ToScopes[SubStmt] = ParentScope;
+ SubStmt = Next;
+ }
+
+ // Recursively walk the AST.
+ if (!BuildScopeInformation(SubStmt, ParentScope))
+ return false;
+ }
+ return true;
+}
+
+/// Checks each jump and stores each variable declaration they bypass.
+void VarBypassDetector::Detect() {
+ for (const auto &S : FromScopes) {
+ const Stmt *St = S.first;
+ unsigned from = S.second;
+ if (const GotoStmt *GS = dyn_cast<GotoStmt>(St)) {
+ if (const LabelStmt *LS = GS->getLabel()->getStmt())
+ Detect(from, ToScopes[LS]);
+ } else if (const SwitchStmt *SS = dyn_cast<SwitchStmt>(St)) {
+ for (const SwitchCase *SC = SS->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase()) {
+ Detect(from, ToScopes[SC]);
+ }
+ } else {
+ llvm_unreachable("goto or switch was expected");
+ }
+ }
+}
+
+/// Checks the jump and stores each variable declaration it bypasses.
+void VarBypassDetector::Detect(unsigned From, unsigned To) {
+ while (From != To) {
+ if (From < To) {
+ assert(Scopes[To].first < To);
+ const auto &ScopeTo = Scopes[To];
+ To = ScopeTo.first;
+ Bypasses.insert(ScopeTo.second);
+ } else {
+ assert(Scopes[From].first < From);
+ From = Scopes[From].first;
+ }
+ }
+}
diff --git a/lib/CodeGen/VarBypassDetector.h b/lib/CodeGen/VarBypassDetector.h
new file mode 100644
index 000000000000..f50baf4bab9f
--- /dev/null
+++ b/lib/CodeGen/VarBypassDetector.h
@@ -0,0 +1,70 @@
+//===--- VarBypassDetector.cpp - Bypass jumps detector ------------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains VarBypassDetector class, which is used to detect
+// local variable declarations which can be bypassed by jumps.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_VARBYPASSDETECTOR_H
+#define LLVM_CLANG_LIB_CODEGEN_VARBYPASSDETECTOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+class Decl;
+class Stmt;
+class VarDecl;
+
+namespace CodeGen {
+
+/// The class detects jumps which bypass local variables declaration:
+/// goto L;
+/// int a;
+/// L:
+///
+/// This is simplified version of JumpScopeChecker. Primary differences:
+/// * Detects only jumps into the scope local variables.
+/// * Does not detect jumps out of the scope of local variables.
+/// * Not limited to variables with initializers, JumpScopeChecker is limited.
+class VarBypassDetector {
+ // Scope information. Contains a parent scope and related variable
+ // declaration.
+ llvm::SmallVector<std::pair<unsigned, const VarDecl *>, 48> Scopes;
+ // List of jumps with scopes.
+ llvm::SmallVector<std::pair<const Stmt *, unsigned>, 16> FromScopes;
+ // Lookup map to find scope for destinations.
+ llvm::DenseMap<const Stmt *, unsigned> ToScopes;
+ // Set of variables which were bypassed by some jump.
+ llvm::DenseSet<const VarDecl *> Bypasses;
+ // If true assume that all variables are being bypassed.
+ bool AlwaysBypassed = false;
+
+public:
+ void Init(const Stmt *Body);
+
+ /// Returns true if the variable declaration was by bypassed by any goto or
+ /// switch statement.
+ bool IsBypassed(const VarDecl *D) const {
+ return AlwaysBypassed || Bypasses.find(D) != Bypasses.end();
+ }
+
+private:
+ bool BuildScopeInformation(const Decl *D, unsigned &ParentScope);
+ bool BuildScopeInformation(const Stmt *S, unsigned &origParentScope);
+ void Detect();
+ void Detect(unsigned From, unsigned To);
+};
+}
+}
+
+#endif
diff --git a/lib/Driver/Action.cpp b/lib/Driver/Action.cpp
index 29a46794d4b9..85e466a4409d 100644
--- a/lib/Driver/Action.cpp
+++ b/lib/Driver/Action.cpp
@@ -9,6 +9,7 @@
#include "clang/Driver/Action.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Regex.h"
@@ -36,6 +37,10 @@ const char *Action::getClassName(ActionClass AC) {
case DsymutilJobClass: return "dsymutil";
case VerifyDebugInfoJobClass: return "verify-debug-info";
case VerifyPCHJobClass: return "verify-pch";
+ case OffloadBundlingJobClass:
+ return "clang-offload-bundler";
+ case OffloadUnbundlingJobClass:
+ return "clang-offload-unbundler";
}
llvm_unreachable("invalid class");
@@ -45,6 +50,9 @@ void Action::propagateDeviceOffloadInfo(OffloadKind OKind, const char *OArch) {
// Offload action set its own kinds on their dependences.
if (Kind == OffloadClass)
return;
+ // Unbundling actions use the host kinds.
+ if (Kind == OffloadUnbundlingJobClass)
+ return;
assert((OffloadingDeviceKind == OKind || OffloadingDeviceKind == OFK_None) &&
"Setting device kind to a different device??");
@@ -87,6 +95,8 @@ std::string Action::getOffloadingKindPrefix() const {
break;
case OFK_Cuda:
return "device-cuda";
+ case OFK_OpenMP:
+ return "device-openmp";
// TODO: Add other programming models here.
}
@@ -97,26 +107,49 @@ std::string Action::getOffloadingKindPrefix() const {
std::string Res("host");
if (ActiveOffloadKindMask & OFK_Cuda)
Res += "-cuda";
+ if (ActiveOffloadKindMask & OFK_OpenMP)
+ Res += "-openmp";
// TODO: Add other programming models here.
return Res;
}
+/// Return a string that can be used as prefix in order to generate unique files
+/// for each offloading kind.
std::string
-Action::getOffloadingFileNamePrefix(llvm::StringRef NormalizedTriple) const {
- // A file prefix is only generated for device actions and consists of the
- // offload kind and triple.
- if (!OffloadingDeviceKind)
+Action::GetOffloadingFileNamePrefix(OffloadKind Kind,
+ llvm::StringRef NormalizedTriple,
+ bool CreatePrefixForHost) {
+ // Don't generate prefix for host actions unless required.
+ if (!CreatePrefixForHost && (Kind == OFK_None || Kind == OFK_Host))
return "";
std::string Res("-");
- Res += getOffloadingKindPrefix();
+ Res += GetOffloadKindName(Kind);
Res += "-";
Res += NormalizedTriple;
return Res;
}
+/// Return a string with the offload kind name. If that is not defined, we
+/// assume 'host'.
+llvm::StringRef Action::GetOffloadKindName(OffloadKind Kind) {
+ switch (Kind) {
+ case OFK_None:
+ case OFK_Host:
+ return "host";
+ case OFK_Cuda:
+ return "cuda";
+ case OFK_OpenMP:
+ return "openmp";
+
+ // TODO: Add other programming models here.
+ }
+
+ llvm_unreachable("invalid offload kind");
+}
+
void InputAction::anchor() {}
InputAction::InputAction(const Arg &_Input, types::ID _Type)
@@ -125,8 +158,8 @@ InputAction::InputAction(const Arg &_Input, types::ID _Type)
void BindArchAction::anchor() {}
-BindArchAction::BindArchAction(Action *Input, const char *_ArchName)
- : Action(BindArchClass, Input), ArchName(_ArchName) {}
+BindArchAction::BindArchAction(Action *Input, llvm::StringRef ArchName)
+ : Action(BindArchClass, Input), ArchName(ArchName) {}
void OffloadAction::anchor() {}
@@ -342,3 +375,13 @@ void VerifyPCHJobAction::anchor() {}
VerifyPCHJobAction::VerifyPCHJobAction(Action *Input, types::ID Type)
: VerifyJobAction(VerifyPCHJobClass, Input, Type) {}
+
+void OffloadBundlingJobAction::anchor() {}
+
+OffloadBundlingJobAction::OffloadBundlingJobAction(ActionList &Inputs)
+ : JobAction(OffloadBundlingJobClass, Inputs, Inputs.front()->getType()) {}
+
+void OffloadUnbundlingJobAction::anchor() {}
+
+OffloadUnbundlingJobAction::OffloadUnbundlingJobAction(Action *Input)
+ : JobAction(OffloadUnbundlingJobClass, Input, Input->getType()) {}
diff --git a/lib/Driver/CMakeLists.txt b/lib/Driver/CMakeLists.txt
index 5b8422ed550c..3ebd1c4f891e 100644
--- a/lib/Driver/CMakeLists.txt
+++ b/lib/Driver/CMakeLists.txt
@@ -12,6 +12,7 @@ add_clang_library(clangDriver
Action.cpp
Compilation.cpp
CrossWindowsToolChain.cpp
+ Distro.cpp
Driver.cpp
DriverOptions.cpp
Job.cpp
diff --git a/lib/Driver/Compilation.cpp b/lib/Driver/Compilation.cpp
index 6a2616f0c2a4..5c13e59a0d73 100644
--- a/lib/Driver/Compilation.cpp
+++ b/lib/Driver/Compilation.cpp
@@ -37,11 +37,9 @@ Compilation::~Compilation() {
delete Args;
// Free any derived arg lists.
- for (llvm::DenseMap<std::pair<const ToolChain*, const char*>,
- DerivedArgList*>::iterator it = TCArgs.begin(),
- ie = TCArgs.end(); it != ie; ++it)
- if (it->second != TranslatedArgs)
- delete it->second;
+ for (auto Arg : TCArgs)
+ if (Arg.second != TranslatedArgs)
+ delete Arg.second;
// Free redirections of stdout/stderr.
if (Redirects) {
@@ -52,14 +50,15 @@ Compilation::~Compilation() {
}
}
-const DerivedArgList &Compilation::getArgsForToolChain(const ToolChain *TC,
- const char *BoundArch) {
+const DerivedArgList &
+Compilation::getArgsForToolChain(const ToolChain *TC, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) {
if (!TC)
TC = &DefaultToolChain;
- DerivedArgList *&Entry = TCArgs[std::make_pair(TC, BoundArch)];
+ DerivedArgList *&Entry = TCArgs[{TC, BoundArch, DeviceOffloadKind}];
if (!Entry) {
- Entry = TC->TranslateArgs(*TranslatedArgs, BoundArch);
+ Entry = TC->TranslateArgs(*TranslatedArgs, BoundArch, DeviceOffloadKind);
if (!Entry)
Entry = TranslatedArgs;
}
diff --git a/lib/Driver/CrossWindowsToolChain.cpp b/lib/Driver/CrossWindowsToolChain.cpp
index 4ebbc533232f..28036ea51cff 100644
--- a/lib/Driver/CrossWindowsToolChain.cpp
+++ b/lib/Driver/CrossWindowsToolChain.cpp
@@ -11,6 +11,7 @@
#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/Path.h"
using namespace clang::driver;
using namespace clang::driver::toolchains;
diff --git a/lib/Driver/Distro.cpp b/lib/Driver/Distro.cpp
new file mode 100644
index 000000000000..d305b179449f
--- /dev/null
+++ b/lib/Driver/Distro.cpp
@@ -0,0 +1,134 @@
+//===--- Distro.cpp - Linux distribution detection support ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Distro.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace clang::driver;
+using namespace clang;
+
+static Distro::DistroType DetectDistro(vfs::FileSystem &VFS) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
+ VFS.getBufferForFile("/etc/lsb-release");
+ if (File) {
+ StringRef Data = File.get()->getBuffer();
+ SmallVector<StringRef, 16> Lines;
+ Data.split(Lines, "\n");
+ Distro::DistroType Version = Distro::UnknownDistro;
+ for (StringRef Line : Lines)
+ if (Version == Distro::UnknownDistro && Line.startswith("DISTRIB_CODENAME="))
+ Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(17))
+ .Case("hardy", Distro::UbuntuHardy)
+ .Case("intrepid", Distro::UbuntuIntrepid)
+ .Case("jaunty", Distro::UbuntuJaunty)
+ .Case("karmic", Distro::UbuntuKarmic)
+ .Case("lucid", Distro::UbuntuLucid)
+ .Case("maverick", Distro::UbuntuMaverick)
+ .Case("natty", Distro::UbuntuNatty)
+ .Case("oneiric", Distro::UbuntuOneiric)
+ .Case("precise", Distro::UbuntuPrecise)
+ .Case("quantal", Distro::UbuntuQuantal)
+ .Case("raring", Distro::UbuntuRaring)
+ .Case("saucy", Distro::UbuntuSaucy)
+ .Case("trusty", Distro::UbuntuTrusty)
+ .Case("utopic", Distro::UbuntuUtopic)
+ .Case("vivid", Distro::UbuntuVivid)
+ .Case("wily", Distro::UbuntuWily)
+ .Case("xenial", Distro::UbuntuXenial)
+ .Case("yakkety", Distro::UbuntuYakkety)
+ .Case("zesty", Distro::UbuntuZesty)
+ .Default(Distro::UnknownDistro);
+ if (Version != Distro::UnknownDistro)
+ return Version;
+ }
+
+ File = VFS.getBufferForFile("/etc/redhat-release");
+ if (File) {
+ StringRef Data = File.get()->getBuffer();
+ if (Data.startswith("Fedora release"))
+ return Distro::Fedora;
+ if (Data.startswith("Red Hat Enterprise Linux") ||
+ Data.startswith("CentOS") ||
+ Data.startswith("Scientific Linux")) {
+ if (Data.find("release 7") != StringRef::npos)
+ return Distro::RHEL7;
+ else if (Data.find("release 6") != StringRef::npos)
+ return Distro::RHEL6;
+ else if (Data.find("release 5") != StringRef::npos)
+ return Distro::RHEL5;
+ }
+ return Distro::UnknownDistro;
+ }
+
+ File = VFS.getBufferForFile("/etc/debian_version");
+ if (File) {
+ StringRef Data = File.get()->getBuffer();
+ // Contents: < major.minor > or < codename/sid >
+ int MajorVersion;
+ if (!Data.split('.').first.getAsInteger(10, MajorVersion)) {
+ switch (MajorVersion) {
+ case 5:
+ return Distro::DebianLenny;
+ case 6:
+ return Distro::DebianSqueeze;
+ case 7:
+ return Distro::DebianWheezy;
+ case 8:
+ return Distro::DebianJessie;
+ case 9:
+ return Distro::DebianStretch;
+ default:
+ return Distro::UnknownDistro;
+ }
+ }
+ return llvm::StringSwitch<Distro::DistroType>(Data.split("\n").first)
+ .Case("squeeze/sid", Distro::DebianSqueeze)
+ .Case("wheezy/sid", Distro::DebianWheezy)
+ .Case("jessie/sid", Distro::DebianJessie)
+ .Case("stretch/sid", Distro::DebianStretch)
+ .Default(Distro::UnknownDistro);
+ }
+
+ File = VFS.getBufferForFile("/etc/SuSE-release");
+ if (File) {
+ StringRef Data = File.get()->getBuffer();
+ SmallVector<StringRef, 8> Lines;
+ Data.split(Lines, "\n");
+ for (const StringRef& Line : Lines) {
+ if (!Line.trim().startswith("VERSION"))
+ continue;
+ std::pair<StringRef, StringRef> SplitLine = Line.split('=');
+ // Old versions have split VERSION and PATCHLEVEL
+ // Newer versions use VERSION = x.y
+ std::pair<StringRef, StringRef> SplitVer = SplitLine.second.trim().split('.');
+ int Version;
+
+ // OpenSUSE/SLES 10 and older are not supported and not compatible
+ // with our rules, so just treat them as Distro::UnknownDistro.
+ if (!SplitVer.first.getAsInteger(10, Version) && Version > 10)
+ return Distro::OpenSUSE;
+ return Distro::UnknownDistro;
+ }
+ return Distro::UnknownDistro;
+ }
+
+ if (VFS.exists("/etc/exherbo-release"))
+ return Distro::Exherbo;
+
+ if (VFS.exists("/etc/arch-release"))
+ return Distro::ArchLinux;
+
+ return Distro::UnknownDistro;
+}
+
+Distro::Distro(vfs::FileSystem &VFS) : DistroVal(DetectDistro(VFS)) {}
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index 02f4a9997711..7bd43ac9da2f 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -32,7 +32,6 @@
#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
-#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -43,6 +42,9 @@
#include <map>
#include <memory>
#include <utility>
+#if LLVM_ON_UNIX
+#include <unistd.h> // getpid
+#endif
using namespace clang::driver;
using namespace clang;
@@ -55,12 +57,12 @@ Driver::Driver(StringRef ClangExecutable, StringRef DefaultTargetTriple,
Mode(GCCMode), SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone),
LTOMode(LTOK_None), ClangExecutable(ClangExecutable),
SysRoot(DEFAULT_SYSROOT), UseStdLib(true),
- DefaultTargetTriple(DefaultTargetTriple),
DriverTitle("clang LLVM compiler"), CCPrintOptionsFilename(nullptr),
CCPrintHeadersFilename(nullptr), CCLogDiagnosticsFilename(nullptr),
CCCPrintBindings(false), CCPrintHeaders(false), CCLogDiagnostics(false),
- CCGenDiagnostics(false), CCCGenericGCCName(""), CheckInputsExist(true),
- CCCUsePCH(true), SuppressMissingInputWarning(false) {
+ CCGenDiagnostics(false), DefaultTargetTriple(DefaultTargetTriple),
+ CCCGenericGCCName(""), CheckInputsExist(true), CCCUsePCH(true),
+ SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
@@ -89,31 +91,39 @@ Driver::~Driver() {
llvm::DeleteContainerSeconds(ToolChains);
}
-void Driver::ParseDriverMode(ArrayRef<const char *> Args) {
- const std::string OptName =
- getOpts().getOption(options::OPT_driver_mode).getPrefixedName();
+void Driver::ParseDriverMode(StringRef ProgramName,
+ ArrayRef<const char *> Args) {
+ auto Default = ToolChain::getTargetAndModeFromProgramName(ProgramName);
+ StringRef DefaultMode(Default.second);
+ setDriverModeFromOption(DefaultMode);
for (const char *ArgPtr : Args) {
// Ingore nullptrs, they are response file's EOL markers
if (ArgPtr == nullptr)
continue;
const StringRef Arg = ArgPtr;
- if (!Arg.startswith(OptName))
- continue;
+ setDriverModeFromOption(Arg);
+ }
+}
- const StringRef Value = Arg.drop_front(OptName.size());
- const unsigned M = llvm::StringSwitch<unsigned>(Value)
- .Case("gcc", GCCMode)
- .Case("g++", GXXMode)
- .Case("cpp", CPPMode)
- .Case("cl", CLMode)
- .Default(~0U);
+void Driver::setDriverModeFromOption(StringRef Opt) {
+ const std::string OptName =
+ getOpts().getOption(options::OPT_driver_mode).getPrefixedName();
+ if (!Opt.startswith(OptName))
+ return;
+ StringRef Value = Opt.drop_front(OptName.size());
- if (M != ~0U)
- Mode = static_cast<DriverMode>(M);
- else
- Diag(diag::err_drv_unsupported_option_argument) << OptName << Value;
- }
+ const unsigned M = llvm::StringSwitch<unsigned>(Value)
+ .Case("gcc", GCCMode)
+ .Case("g++", GXXMode)
+ .Case("cpp", CPPMode)
+ .Case("cl", CLMode)
+ .Default(~0U);
+
+ if (M != ~0U)
+ Mode = static_cast<DriverMode>(M);
+ else
+ Diag(diag::err_drv_unsupported_option_argument) << OptName << Value;
}
InputArgList Driver::ParseArgStrings(ArrayRef<const char *> ArgStrings) {
@@ -170,6 +180,10 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
(PhaseArg = DAL.getLastArg(options::OPT__SLASH_P))) {
FinalPhase = phases::Preprocess;
+ // --precompile only runs up to precompilation.
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT__precompile))) {
+ FinalPhase = phases::Precompile;
+
// -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
(PhaseArg = DAL.getLastArg(options::OPT_module_file_info)) ||
@@ -423,6 +437,32 @@ void Driver::setLTOMode(const llvm::opt::ArgList &Args) {
}
}
+/// Compute the desired OpenMP runtime from the flags provided.
+Driver::OpenMPRuntimeKind Driver::getOpenMPRuntime(const ArgList &Args) const {
+ StringRef RuntimeName(CLANG_DEFAULT_OPENMP_RUNTIME);
+
+ const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ);
+ if (A)
+ RuntimeName = A->getValue();
+
+ auto RT = llvm::StringSwitch<OpenMPRuntimeKind>(RuntimeName)
+ .Case("libomp", OMPRT_OMP)
+ .Case("libgomp", OMPRT_GOMP)
+ .Case("libiomp5", OMPRT_IOMP5)
+ .Default(OMPRT_Unknown);
+
+ if (RT == OMPRT_Unknown) {
+ if (A)
+ Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << A->getValue();
+ else
+ // FIXME: We could use a nicer diagnostic here.
+ Diag(diag::err_drv_unsupported_opt) << "-fopenmp";
+ }
+
+ return RT;
+}
+
void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
InputList &Inputs) {
@@ -433,14 +473,71 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
if (llvm::any_of(Inputs, [](std::pair<types::ID, const llvm::opt::Arg *> &I) {
return types::isCuda(I.first);
})) {
- const ToolChain &TC = getToolChain(
- C.getInputArgs(),
- llvm::Triple(C.getSingleOffloadToolChain<Action::OFK_Host>()
- ->getTriple()
- .isArch64Bit()
- ? "nvptx64-nvidia-cuda"
- : "nvptx-nvidia-cuda"));
- C.addOffloadDeviceToolChain(&TC, Action::OFK_Cuda);
+ const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
+ const llvm::Triple &HostTriple = HostTC->getTriple();
+ llvm::Triple CudaTriple(HostTriple.isArch64Bit() ? "nvptx64-nvidia-cuda"
+ : "nvptx-nvidia-cuda");
+ // Use the CUDA and host triples as the key into the ToolChains map, because
+ // the device toolchain we create depends on both.
+ ToolChain *&CudaTC = ToolChains[CudaTriple.str() + "/" + HostTriple.str()];
+ if (!CudaTC) {
+ CudaTC = new toolchains::CudaToolChain(*this, CudaTriple, *HostTC,
+ C.getInputArgs());
+ }
+ C.addOffloadDeviceToolChain(CudaTC, Action::OFK_Cuda);
+ }
+
+ //
+ // OpenMP
+ //
+ // We need to generate an OpenMP toolchain if the user specified targets with
+ // the -fopenmp-targets option.
+ if (Arg *OpenMPTargets =
+ C.getInputArgs().getLastArg(options::OPT_fopenmp_targets_EQ)) {
+ if (OpenMPTargets->getNumValues()) {
+ // We expect that -fopenmp-targets is always used in conjunction with the
+ // option -fopenmp specifying a valid runtime with offloading support,
+ // i.e. libomp or libiomp.
+ bool HasValidOpenMPRuntime = C.getInputArgs().hasFlag(
+ options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false);
+ if (HasValidOpenMPRuntime) {
+ OpenMPRuntimeKind OpenMPKind = getOpenMPRuntime(C.getInputArgs());
+ HasValidOpenMPRuntime =
+ OpenMPKind == OMPRT_OMP || OpenMPKind == OMPRT_IOMP5;
+ }
+
+ if (HasValidOpenMPRuntime) {
+ llvm::StringMap<const char *> FoundNormalizedTriples;
+ for (const char *Val : OpenMPTargets->getValues()) {
+ llvm::Triple TT(Val);
+ std::string NormalizedName = TT.normalize();
+
+ // Make sure we don't have a duplicate triple.
+ auto Duplicate = FoundNormalizedTriples.find(NormalizedName);
+ if (Duplicate != FoundNormalizedTriples.end()) {
+ Diag(clang::diag::warn_drv_omp_offload_target_duplicate)
+ << Val << Duplicate->second;
+ continue;
+ }
+
+ // Store the current triple so that we can check for duplicates in the
+ // following iterations.
+ FoundNormalizedTriples[NormalizedName] = Val;
+
+ // If the specified target is invalid, emit a diagnostic.
+ if (TT.getArch() == llvm::Triple::UnknownArch)
+ Diag(clang::diag::err_drv_invalid_omp_target) << Val;
+ else {
+ const ToolChain &TC = getToolChain(C.getInputArgs(), TT);
+ C.addOffloadDeviceToolChain(&TC, Action::OFK_OpenMP);
+ }
+ }
+ } else
+ Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets);
+ } else
+ Diag(clang::diag::warn_drv_empty_joined_argument)
+ << OpenMPTargets->getAsString(C.getInputArgs());
}
//
@@ -456,8 +553,9 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// FIXME: Handle environment options which affect driver behavior, somewhere
// (client?). GCC_EXEC_PREFIX, LPATH, CC_PRINT_OPTIONS.
- if (char *env = ::getenv("COMPILER_PATH")) {
- StringRef CompilerPath = env;
+ if (Optional<std::string> CompilerPathValue =
+ llvm::sys::Process::GetEnv("COMPILER_PATH")) {
+ StringRef CompilerPath = *CompilerPathValue;
while (!CompilerPath.empty()) {
std::pair<StringRef, StringRef> Split =
CompilerPath.split(llvm::sys::EnvPathSeparator);
@@ -468,7 +566,7 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// We look for the driver mode option early, because the mode can affect
// how other options are parsed.
- ParseDriverMode(ArgList.slice(1));
+ ParseDriverMode(ClangExecutable, ArgList.slice(1));
// FIXME: What are we going to do with -V and -b?
@@ -535,26 +633,20 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
setLTOMode(Args);
- // Ignore -fembed-bitcode options with LTO
- // since the output will be bitcode anyway.
- if (getLTOMode() == LTOK_None) {
- if (Arg *A = Args.getLastArg(options::OPT_fembed_bitcode_EQ)) {
- StringRef Name = A->getValue();
- unsigned Model = llvm::StringSwitch<unsigned>(Name)
- .Case("off", EmbedNone)
- .Case("all", EmbedBitcode)
- .Case("bitcode", EmbedBitcode)
- .Case("marker", EmbedMarker)
- .Default(~0U);
- if (Model == ~0U) {
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
- << Name;
- } else
- BitcodeEmbed = static_cast<BitcodeEmbedMode>(Model);
- }
- } else {
- // claim the bitcode option under LTO so no warning is issued.
- Args.ClaimAllArgs(options::OPT_fembed_bitcode_EQ);
+ // Process -fembed-bitcode= flags.
+ if (Arg *A = Args.getLastArg(options::OPT_fembed_bitcode_EQ)) {
+ StringRef Name = A->getValue();
+ unsigned Model = llvm::StringSwitch<unsigned>(Name)
+ .Case("off", EmbedNone)
+ .Case("all", EmbedBitcode)
+ .Case("bitcode", EmbedBitcode)
+ .Case("marker", EmbedMarker)
+ .Default(~0U);
+ if (Model == ~0U) {
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
+ << Name;
+ } else
+ BitcodeEmbed = static_cast<BitcodeEmbedMode>(Model);
}
std::unique_ptr<llvm::opt::InputArgList> UArgs =
@@ -610,6 +702,95 @@ static void printArgList(raw_ostream &OS, const llvm::opt::ArgList &Args) {
OS << '\n';
}
+bool Driver::getCrashDiagnosticFile(StringRef ReproCrashFilename,
+ SmallString<128> &CrashDiagDir) {
+ using namespace llvm::sys;
+ assert(llvm::Triple(llvm::sys::getProcessTriple()).isOSDarwin() &&
+ "Only knows about .crash files on Darwin");
+
+ // The .crash file can be found on at ~/Library/Logs/DiagnosticReports/
+ // (or /Library/Logs/DiagnosticReports for root) and has the filename pattern
+ // clang-<VERSION>_<YYYY-MM-DD-HHMMSS>_<hostname>.crash.
+ path::home_directory(CrashDiagDir);
+ if (CrashDiagDir.startswith("/var/root"))
+ CrashDiagDir = "/";
+ path::append(CrashDiagDir, "Library/Logs/DiagnosticReports");
+ int PID =
+#if LLVM_ON_UNIX
+ getpid();
+#else
+ 0;
+#endif
+ std::error_code EC;
+ fs::file_status FileStatus;
+ TimePoint<> LastAccessTime;
+ SmallString<128> CrashFilePath;
+ // Lookup the .crash files and get the one generated by a subprocess spawned
+ // by this driver invocation.
+ for (fs::directory_iterator File(CrashDiagDir, EC), FileEnd;
+ File != FileEnd && !EC; File.increment(EC)) {
+ StringRef FileName = path::filename(File->path());
+ if (!FileName.startswith(Name))
+ continue;
+ if (fs::status(File->path(), FileStatus))
+ continue;
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> CrashFile =
+ llvm::MemoryBuffer::getFile(File->path());
+ if (!CrashFile)
+ continue;
+ // The first line should start with "Process:", otherwise this isn't a real
+ // .crash file.
+ StringRef Data = CrashFile.get()->getBuffer();
+ if (!Data.startswith("Process:"))
+ continue;
+ // Parse parent process pid line, e.g: "Parent Process: clang-4.0 [79141]"
+ size_t ParentProcPos = Data.find("Parent Process:");
+ if (ParentProcPos == StringRef::npos)
+ continue;
+ size_t LineEnd = Data.find_first_of("\n", ParentProcPos);
+ if (LineEnd == StringRef::npos)
+ continue;
+ StringRef ParentProcess = Data.slice(ParentProcPos+15, LineEnd).trim();
+ int OpenBracket = -1, CloseBracket = -1;
+ for (size_t i = 0, e = ParentProcess.size(); i < e; ++i) {
+ if (ParentProcess[i] == '[')
+ OpenBracket = i;
+ if (ParentProcess[i] == ']')
+ CloseBracket = i;
+ }
+ // Extract the parent process PID from the .crash file and check whether
+ // it matches this driver invocation pid.
+ int CrashPID;
+ if (OpenBracket < 0 || CloseBracket < 0 ||
+ ParentProcess.slice(OpenBracket + 1, CloseBracket)
+ .getAsInteger(10, CrashPID) || CrashPID != PID) {
+ continue;
+ }
+
+ // Found a .crash file matching the driver pid. To avoid getting an older
+ // and misleading crash file, continue looking for the most recent.
+ // FIXME: the driver can dispatch multiple cc1 invocations, leading to
+ // multiple crashes poiting to the same parent process. Since the driver
+ // does not collect pid information for the dispatched invocation there's
+ // currently no way to distinguish among them.
+ const auto FileAccessTime = FileStatus.getLastModificationTime();
+ if (FileAccessTime > LastAccessTime) {
+ CrashFilePath.assign(File->path());
+ LastAccessTime = FileAccessTime;
+ }
+ }
+
+ // If found, copy it over to the location of other reproducer files.
+ if (!CrashFilePath.empty()) {
+ EC = fs::copy_file(CrashFilePath, ReproCrashFilename);
+ if (EC)
+ return false;
+ return true;
+ }
+
+ return false;
+}
+
// When clang crashes, produce diagnostic information including the fully
// preprocessed source file(s). Request that the developer attach the
// diagnostic information to a bug report.
@@ -737,8 +918,13 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
"Preprocessed source(s) and associated run script(s) are located at:";
SmallString<128> VFS;
+ SmallString<128> ReproCrashFilename;
for (const char *TempFile : TempFiles) {
Diag(clang::diag::note_drv_command_failed_diag_msg) << TempFile;
+ if (ReproCrashFilename.empty()) {
+ ReproCrashFilename = TempFile;
+ llvm::sys::path::replace_extension(ReproCrashFilename, ".crash");
+ }
if (StringRef(TempFile).endswith(".cache")) {
// In some cases (modules) we'll dump extra data to help with reproducing
// the crash into a directory next to the output.
@@ -766,6 +952,24 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
}
+ // On darwin, provide information about the .crash diagnostic report.
+ if (llvm::Triple(llvm::sys::getProcessTriple()).isOSDarwin()) {
+ SmallString<128> CrashDiagDir;
+ if (getCrashDiagnosticFile(ReproCrashFilename, CrashDiagDir)) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << ReproCrashFilename.str();
+ } else { // Suggest a directory for the user to look for .crash files.
+ llvm::sys::path::append(CrashDiagDir, Name);
+ CrashDiagDir += "_<YYYY-MM-DD-HHMMSS>_<hostname>.crash";
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Crash backtrace is located in";
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << CrashDiagDir.str();
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "(choose the .crash file that corresponds to your crash)";
+ }
+ }
+
for (const auto &A : C.getArgs().filtered(options::OPT_frewrite_map_file,
options::OPT_frewrite_map_file_EQ))
Diag(clang::diag::note_drv_command_failed_diag_msg) << A->getValue();
@@ -783,8 +987,7 @@ void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
return;
std::string TmpName = GetTemporaryPath("response", "txt");
- Cmd.setResponseFile(
- C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str())));
+ Cmd.setResponseFile(C.addTempFile(C.getArgs().MakeArgString(TmpName)));
}
int Driver::ExecuteCompilation(
@@ -982,7 +1185,15 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
}
if (C.getArgs().hasArg(options::OPT_print_libgcc_file_name)) {
- llvm::outs() << GetFilePath("libgcc.a", TC) << "\n";
+ ToolChain::RuntimeLibType RLT = TC.GetRuntimeLibType(C.getArgs());
+ switch (RLT) {
+ case ToolChain::RLT_CompilerRT:
+ llvm::outs() << TC.getCompilerRT(C.getArgs(), "builtins") << "\n";
+ break;
+ case ToolChain::RLT_Libgcc:
+ llvm::outs() << GetFilePath("libgcc.a", TC) << "\n";
+ break;
+ }
return false;
}
@@ -1388,132 +1599,745 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
}
}
-// For each unique --cuda-gpu-arch= argument creates a TY_CUDA_DEVICE
-// input action and then wraps each in CudaDeviceAction paired with
-// appropriate GPU arch name. In case of partial (i.e preprocessing
-// only) or device-only compilation, each device action is added to /p
-// Actions and /p Current is released. Otherwise the function creates
-// and returns a new CudaHostAction which wraps /p Current and device
-// side actions.
-static Action *buildCudaActions(Compilation &C, DerivedArgList &Args,
- const Arg *InputArg, Action *HostAction,
- ActionList &Actions) {
- Arg *PartialCompilationArg = Args.getLastArg(
- options::OPT_cuda_host_only, options::OPT_cuda_device_only,
- options::OPT_cuda_compile_host_device);
- bool CompileHostOnly =
- PartialCompilationArg &&
- PartialCompilationArg->getOption().matches(options::OPT_cuda_host_only);
- bool CompileDeviceOnly =
- PartialCompilationArg &&
- PartialCompilationArg->getOption().matches(options::OPT_cuda_device_only);
-
- if (CompileHostOnly) {
+namespace {
+/// Provides a convenient interface for different programming models to generate
+/// the required device actions.
+class OffloadingActionBuilder final {
+ /// Flag used to trace errors in the builder.
+ bool IsValid = false;
+
+ /// The compilation that is using this builder.
+ Compilation &C;
+
+ /// Map between an input argument and the offload kinds used to process it.
+ std::map<const Arg *, unsigned> InputArgToOffloadKindMap;
+
+ /// Builder interface. It doesn't build anything or keep any state.
+ class DeviceActionBuilder {
+ public:
+ typedef llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PhasesTy;
+
+ enum ActionBuilderReturnCode {
+ // The builder acted successfully on the current action.
+ ABRT_Success,
+ // The builder didn't have to act on the current action.
+ ABRT_Inactive,
+ // The builder was successful and requested the host action to not be
+ // generated.
+ ABRT_Ignore_Host,
+ };
+
+ protected:
+ /// Compilation associated with this builder.
+ Compilation &C;
+
+ /// Tool chains associated with this builder. The same programming
+ /// model may have associated one or more tool chains.
+ SmallVector<const ToolChain *, 2> ToolChains;
+
+ /// The derived arguments associated with this builder.
+ DerivedArgList &Args;
+
+ /// The inputs associated with this builder.
+ const Driver::InputList &Inputs;
+
+ /// The associated offload kind.
+ Action::OffloadKind AssociatedOffloadKind = Action::OFK_None;
+
+ public:
+ DeviceActionBuilder(Compilation &C, DerivedArgList &Args,
+ const Driver::InputList &Inputs,
+ Action::OffloadKind AssociatedOffloadKind)
+ : C(C), Args(Args), Inputs(Inputs),
+ AssociatedOffloadKind(AssociatedOffloadKind) {}
+ virtual ~DeviceActionBuilder() {}
+
+ /// Fill up the array \a DA with all the device dependences that should be
+ /// added to the provided host action \a HostAction. By default it is
+ /// inactive.
+ virtual ActionBuilderReturnCode
+ getDeviceDependences(OffloadAction::DeviceDependences &DA,
+ phases::ID CurPhase, phases::ID FinalPhase,
+ PhasesTy &Phases) {
+ return ABRT_Inactive;
+ }
+
+ /// Update the state to include the provided host action \a HostAction as a
+ /// dependency of the current device action. By default it is inactive.
+ virtual ActionBuilderReturnCode addDeviceDepences(Action *HostAction) {
+ return ABRT_Inactive;
+ }
+
+ /// Append top level actions generated by the builder. Return true if errors
+ /// were found.
+ virtual void appendTopLevelActions(ActionList &AL) {}
+
+ /// Append linker actions generated by the builder. Return true if errors
+ /// were found.
+ virtual void appendLinkDependences(OffloadAction::DeviceDependences &DA) {}
+
+ /// Initialize the builder. Return true if any initialization errors are
+ /// found.
+ virtual bool initialize() { return false; }
+
+ /// Return true if the builder can use bundling/unbundling.
+ virtual bool canUseBundlerUnbundler() const { return false; }
+
+ /// Return true if this builder is valid. We have a valid builder if we have
+ /// associated device tool chains.
+ bool isValid() { return !ToolChains.empty(); }
+
+ /// Return the associated offload kind.
+ Action::OffloadKind getAssociatedOffloadKind() {
+ return AssociatedOffloadKind;
+ }
+ };
+
+ /// \brief CUDA action builder. It injects device code in the host backend
+ /// action.
+ class CudaActionBuilder final : public DeviceActionBuilder {
+ /// Flags to signal if the user requested host-only or device-only
+ /// compilation.
+ bool CompileHostOnly = false;
+ bool CompileDeviceOnly = false;
+
+ /// List of GPU architectures to use in this compilation.
+ SmallVector<CudaArch, 4> GpuArchList;
+
+ /// The CUDA actions for the current input.
+ ActionList CudaDeviceActions;
+
+ /// The CUDA fat binary if it was generated for the current input.
+ Action *CudaFatBinary = nullptr;
+
+ /// Flag that is set to true if this builder acted on the current input.
+ bool IsActive = false;
+
+ public:
+ CudaActionBuilder(Compilation &C, DerivedArgList &Args,
+ const Driver::InputList &Inputs)
+ : DeviceActionBuilder(C, Args, Inputs, Action::OFK_Cuda) {}
+
+ ActionBuilderReturnCode
+ getDeviceDependences(OffloadAction::DeviceDependences &DA,
+ phases::ID CurPhase, phases::ID FinalPhase,
+ PhasesTy &Phases) override {
+ if (!IsActive)
+ return ABRT_Inactive;
+
+ // If we don't have more CUDA actions, we don't have any dependences to
+ // create for the host.
+ if (CudaDeviceActions.empty())
+ return ABRT_Success;
+
+ assert(CudaDeviceActions.size() == GpuArchList.size() &&
+ "Expecting one action per GPU architecture.");
+ assert(!CompileHostOnly &&
+ "Not expecting CUDA actions in host-only compilation.");
+
+ // If we are generating code for the device or we are in a backend phase,
+ // we attempt to generate the fat binary. We compile each arch to ptx and
+ // assemble to cubin, then feed the cubin *and* the ptx into a device
+ // "link" action, which uses fatbinary to combine these cubins into one
+ // fatbin. The fatbin is then an input to the host action if not in
+ // device-only mode.
+ if (CompileDeviceOnly || CurPhase == phases::Backend) {
+ ActionList DeviceActions;
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
+ // Produce the device action from the current phase up to the assemble
+ // phase.
+ for (auto Ph : Phases) {
+ // Skip the phases that were already dealt with.
+ if (Ph < CurPhase)
+ continue;
+ // We have to be consistent with the host final phase.
+ if (Ph > FinalPhase)
+ break;
+
+ CudaDeviceActions[I] = C.getDriver().ConstructPhaseAction(
+ C, Args, Ph, CudaDeviceActions[I]);
+
+ if (Ph == phases::Assemble)
+ break;
+ }
+
+ // If we didn't reach the assemble phase, we can't generate the fat
+ // binary. We don't need to generate the fat binary if we are not in
+ // device-only mode.
+ if (!isa<AssembleJobAction>(CudaDeviceActions[I]) ||
+ CompileDeviceOnly)
+ continue;
+
+ Action *AssembleAction = CudaDeviceActions[I];
+ assert(AssembleAction->getType() == types::TY_Object);
+ assert(AssembleAction->getInputs().size() == 1);
+
+ Action *BackendAction = AssembleAction->getInputs()[0];
+ assert(BackendAction->getType() == types::TY_PP_Asm);
+
+ for (auto &A : {AssembleAction, BackendAction}) {
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*A, *ToolChains.front(), CudaArchToString(GpuArchList[I]),
+ Action::OFK_Cuda);
+ DeviceActions.push_back(
+ C.MakeAction<OffloadAction>(DDep, A->getType()));
+ }
+ }
+
+ // We generate the fat binary if we have device input actions.
+ if (!DeviceActions.empty()) {
+ CudaFatBinary =
+ C.MakeAction<LinkJobAction>(DeviceActions, types::TY_CUDA_FATBIN);
+
+ if (!CompileDeviceOnly) {
+ DA.add(*CudaFatBinary, *ToolChains.front(), /*BoundArch=*/nullptr,
+ Action::OFK_Cuda);
+ // Clear the fat binary, it is already a dependence to an host
+ // action.
+ CudaFatBinary = nullptr;
+ }
+
+ // Remove the CUDA actions as they are already connected to an host
+ // action or fat binary.
+ CudaDeviceActions.clear();
+ }
+
+ // We avoid creating host action in device-only mode.
+ return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
+ } else if (CurPhase > phases::Backend) {
+ // If we are past the backend phase and still have a device action, we
+ // don't have to do anything as this action is already a device
+ // top-level action.
+ return ABRT_Success;
+ }
+
+ assert(CurPhase < phases::Backend && "Generating single CUDA "
+ "instructions should only occur "
+ "before the backend phase!");
+
+ // By default, we produce an action for each device arch.
+ for (Action *&A : CudaDeviceActions)
+ A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A);
+
+ return ABRT_Success;
+ }
+
+ ActionBuilderReturnCode addDeviceDepences(Action *HostAction) override {
+ // While generating code for CUDA, we only depend on the host input action
+ // to trigger the creation of all the CUDA device actions.
+
+ // If we are dealing with an input action, replicate it for each GPU
+ // architecture. If we are in host-only mode we return 'success' so that
+ // the host uses the CUDA offload kind.
+ if (auto *IA = dyn_cast<InputAction>(HostAction)) {
+ assert(!GpuArchList.empty() &&
+ "We should have at least one GPU architecture.");
+
+ // If the host input is not CUDA, we don't need to bother about this
+ // input.
+ if (IA->getType() != types::TY_CUDA) {
+ // The builder will ignore this input.
+ IsActive = false;
+ return ABRT_Inactive;
+ }
+
+ // Set the flag to true, so that the builder acts on the current input.
+ IsActive = true;
+
+ if (CompileHostOnly)
+ return ABRT_Success;
+
+ // Replicate inputs for each GPU architecture.
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
+ CudaDeviceActions.push_back(C.MakeAction<InputAction>(
+ IA->getInputArg(), types::TY_CUDA_DEVICE));
+
+ return ABRT_Success;
+ }
+
+ return IsActive ? ABRT_Success : ABRT_Inactive;
+ }
+
+ void appendTopLevelActions(ActionList &AL) override {
+ // Utility to append actions to the top level list.
+ auto AddTopLevel = [&](Action *A, CudaArch BoundArch) {
+ OffloadAction::DeviceDependences Dep;
+ Dep.add(*A, *ToolChains.front(), CudaArchToString(BoundArch),
+ Action::OFK_Cuda);
+ AL.push_back(C.MakeAction<OffloadAction>(Dep, A->getType()));
+ };
+
+ // If we have a fat binary, add it to the list.
+ if (CudaFatBinary) {
+ AddTopLevel(CudaFatBinary, CudaArch::UNKNOWN);
+ CudaDeviceActions.clear();
+ CudaFatBinary = nullptr;
+ return;
+ }
+
+ if (CudaDeviceActions.empty())
+ return;
+
+ // If we have CUDA actions at this point, that's because we have a have
+ // partial compilation, so we should have an action for each GPU
+ // architecture.
+ assert(CudaDeviceActions.size() == GpuArchList.size() &&
+ "Expecting one action per GPU architecture.");
+ assert(ToolChains.size() == 1 &&
+ "Expecting to have a sing CUDA toolchain.");
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
+ AddTopLevel(CudaDeviceActions[I], GpuArchList[I]);
+
+ CudaDeviceActions.clear();
+ }
+
+ bool initialize() override {
+ // We don't need to support CUDA.
+ if (!C.hasOffloadToolChain<Action::OFK_Cuda>())
+ return false;
+
+ const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
+ assert(HostTC && "No toolchain for host compilation.");
+ if (HostTC->getTriple().isNVPTX()) {
+ // We do not support targeting NVPTX for host compilation. Throw
+ // an error and abort pipeline construction early so we don't trip
+ // asserts that assume device-side compilation.
+ C.getDriver().Diag(diag::err_drv_cuda_nvptx_host);
+ return true;
+ }
+
+ ToolChains.push_back(C.getSingleOffloadToolChain<Action::OFK_Cuda>());
+
+ Arg *PartialCompilationArg = Args.getLastArg(
+ options::OPT_cuda_host_only, options::OPT_cuda_device_only,
+ options::OPT_cuda_compile_host_device);
+ CompileHostOnly = PartialCompilationArg &&
+ PartialCompilationArg->getOption().matches(
+ options::OPT_cuda_host_only);
+ CompileDeviceOnly = PartialCompilationArg &&
+ PartialCompilationArg->getOption().matches(
+ options::OPT_cuda_device_only);
+
+ // Collect all cuda_gpu_arch parameters, removing duplicates.
+ std::set<CudaArch> GpuArchs;
+ bool Error = false;
+ for (Arg *A : Args) {
+ if (!(A->getOption().matches(options::OPT_cuda_gpu_arch_EQ) ||
+ A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ)))
+ continue;
+ A->claim();
+
+ const StringRef ArchStr = A->getValue();
+ if (A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ) &&
+ ArchStr == "all") {
+ GpuArchs.clear();
+ continue;
+ }
+ CudaArch Arch = StringToCudaArch(ArchStr);
+ if (Arch == CudaArch::UNKNOWN) {
+ C.getDriver().Diag(clang::diag::err_drv_cuda_bad_gpu_arch) << ArchStr;
+ Error = true;
+ } else if (A->getOption().matches(options::OPT_cuda_gpu_arch_EQ))
+ GpuArchs.insert(Arch);
+ else if (A->getOption().matches(options::OPT_no_cuda_gpu_arch_EQ))
+ GpuArchs.erase(Arch);
+ else
+ llvm_unreachable("Unexpected option.");
+ }
+
+ // Collect list of GPUs remaining in the set.
+ for (CudaArch Arch : GpuArchs)
+ GpuArchList.push_back(Arch);
+
+ // Default to sm_20 which is the lowest common denominator for
+ // supported GPUs. sm_20 code should work correctly, if
+ // suboptimally, on all newer GPUs.
+ if (GpuArchList.empty())
+ GpuArchList.push_back(CudaArch::SM_20);
+
+ return Error;
+ }
+ };
+
+ /// OpenMP action builder. The host bitcode is passed to the device frontend
+ /// and all the device linked images are passed to the host link phase.
+ class OpenMPActionBuilder final : public DeviceActionBuilder {
+ /// The OpenMP actions for the current input.
+ ActionList OpenMPDeviceActions;
+
+ /// The linker inputs obtained for each toolchain.
+ SmallVector<ActionList, 8> DeviceLinkerInputs;
+
+ public:
+ OpenMPActionBuilder(Compilation &C, DerivedArgList &Args,
+ const Driver::InputList &Inputs)
+ : DeviceActionBuilder(C, Args, Inputs, Action::OFK_OpenMP) {}
+
+ ActionBuilderReturnCode
+ getDeviceDependences(OffloadAction::DeviceDependences &DA,
+ phases::ID CurPhase, phases::ID FinalPhase,
+ PhasesTy &Phases) override {
+
+ // We should always have an action for each input.
+ assert(OpenMPDeviceActions.size() == ToolChains.size() &&
+ "Number of OpenMP actions and toolchains do not match.");
+
+ // The host only depends on device action in the linking phase, when all
+ // the device images have to be embedded in the host image.
+ if (CurPhase == phases::Link) {
+ assert(ToolChains.size() == DeviceLinkerInputs.size() &&
+ "Toolchains and linker inputs sizes do not match.");
+ auto LI = DeviceLinkerInputs.begin();
+ for (auto *A : OpenMPDeviceActions) {
+ LI->push_back(A);
+ ++LI;
+ }
+
+ // We passed the device action as a host dependence, so we don't need to
+ // do anything else with them.
+ OpenMPDeviceActions.clear();
+ return ABRT_Success;
+ }
+
+ // By default, we produce an action for each device arch.
+ for (Action *&A : OpenMPDeviceActions)
+ A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A);
+
+ return ABRT_Success;
+ }
+
+ ActionBuilderReturnCode addDeviceDepences(Action *HostAction) override {
+
+ // If this is an input action replicate it for each OpenMP toolchain.
+ if (auto *IA = dyn_cast<InputAction>(HostAction)) {
+ OpenMPDeviceActions.clear();
+ for (unsigned I = 0; I < ToolChains.size(); ++I)
+ OpenMPDeviceActions.push_back(
+ C.MakeAction<InputAction>(IA->getInputArg(), IA->getType()));
+ return ABRT_Success;
+ }
+
+ // If this is an unbundling action use it as is for each OpenMP toolchain.
+ if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(HostAction)) {
+ OpenMPDeviceActions.clear();
+ for (unsigned I = 0; I < ToolChains.size(); ++I) {
+ OpenMPDeviceActions.push_back(UA);
+ UA->registerDependentActionInfo(
+ ToolChains[I], /*BoundArch=*/StringRef(), Action::OFK_OpenMP);
+ }
+ return ABRT_Success;
+ }
+
+ // When generating code for OpenMP we use the host compile phase result as
+ // a dependence to the device compile phase so that it can learn what
+ // declarations should be emitted. However, this is not the only use for
+ // the host action, so we prevent it from being collapsed.
+ if (isa<CompileJobAction>(HostAction)) {
+ HostAction->setCannotBeCollapsedWithNextDependentAction();
+ assert(ToolChains.size() == OpenMPDeviceActions.size() &&
+ "Toolchains and device action sizes do not match.");
+ OffloadAction::HostDependence HDep(
+ *HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
+ /*BoundArch=*/nullptr, Action::OFK_OpenMP);
+ auto TC = ToolChains.begin();
+ for (Action *&A : OpenMPDeviceActions) {
+ assert(isa<CompileJobAction>(A));
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*A, **TC, /*BoundArch=*/nullptr, Action::OFK_OpenMP);
+ A = C.MakeAction<OffloadAction>(HDep, DDep);
+ ++TC;
+ }
+ }
+ return ABRT_Success;
+ }
+
+ void appendTopLevelActions(ActionList &AL) override {
+ if (OpenMPDeviceActions.empty())
+ return;
+
+ // We should always have an action for each input.
+ assert(OpenMPDeviceActions.size() == ToolChains.size() &&
+ "Number of OpenMP actions and toolchains do not match.");
+
+ // Append all device actions followed by the proper offload action.
+ auto TI = ToolChains.begin();
+ for (auto *A : OpenMPDeviceActions) {
+ OffloadAction::DeviceDependences Dep;
+ Dep.add(*A, **TI, /*BoundArch=*/nullptr, Action::OFK_OpenMP);
+ AL.push_back(C.MakeAction<OffloadAction>(Dep, A->getType()));
+ ++TI;
+ }
+ // We no longer need the action stored in this builder.
+ OpenMPDeviceActions.clear();
+ }
+
+ void appendLinkDependences(OffloadAction::DeviceDependences &DA) override {
+ assert(ToolChains.size() == DeviceLinkerInputs.size() &&
+ "Toolchains and linker inputs sizes do not match.");
+
+ // Append a new link action for each device.
+ auto TC = ToolChains.begin();
+ for (auto &LI : DeviceLinkerInputs) {
+ auto *DeviceLinkAction =
+ C.MakeAction<LinkJobAction>(LI, types::TY_Image);
+ DA.add(*DeviceLinkAction, **TC, /*BoundArch=*/nullptr,
+ Action::OFK_OpenMP);
+ ++TC;
+ }
+ }
+
+ bool initialize() override {
+ // Get the OpenMP toolchains. If we don't get any, the action builder will
+ // know there is nothing to do related to OpenMP offloading.
+ auto OpenMPTCRange = C.getOffloadToolChains<Action::OFK_OpenMP>();
+ for (auto TI = OpenMPTCRange.first, TE = OpenMPTCRange.second; TI != TE;
+ ++TI)
+ ToolChains.push_back(TI->second);
+
+ DeviceLinkerInputs.resize(ToolChains.size());
+ return false;
+ }
+
+ bool canUseBundlerUnbundler() const override {
+ // OpenMP should use bundled files whenever possible.
+ return true;
+ }
+ };
+
+ ///
+ /// TODO: Add the implementation for other specialized builders here.
+ ///
+
+ /// Specialized builders being used by this offloading action builder.
+ SmallVector<DeviceActionBuilder *, 4> SpecializedBuilders;
+
+ /// Flag set to true if all valid builders allow file bundling/unbundling.
+ bool CanUseBundler;
+
+public:
+ OffloadingActionBuilder(Compilation &C, DerivedArgList &Args,
+ const Driver::InputList &Inputs)
+ : C(C) {
+ // Create a specialized builder for each device toolchain.
+
+ IsValid = true;
+
+ // Create a specialized builder for CUDA.
+ SpecializedBuilders.push_back(new CudaActionBuilder(C, Args, Inputs));
+
+ // Create a specialized builder for OpenMP.
+ SpecializedBuilders.push_back(new OpenMPActionBuilder(C, Args, Inputs));
+
+ //
+ // TODO: Build other specialized builders here.
+ //
+
+ // Initialize all the builders, keeping track of errors. If all valid
+ // builders agree that we can use bundling, set the flag to true.
+ unsigned ValidBuilders = 0u;
+ unsigned ValidBuildersSupportingBundling = 0u;
+ for (auto *SB : SpecializedBuilders) {
+ IsValid = IsValid && !SB->initialize();
+
+ // Update the counters if the builder is valid.
+ if (SB->isValid()) {
+ ++ValidBuilders;
+ if (SB->canUseBundlerUnbundler())
+ ++ValidBuildersSupportingBundling;
+ }
+ }
+ CanUseBundler =
+ ValidBuilders && ValidBuilders == ValidBuildersSupportingBundling;
+ }
+
+ ~OffloadingActionBuilder() {
+ for (auto *SB : SpecializedBuilders)
+ delete SB;
+ }
+
+ /// Generate an action that adds device dependences (if any) to a host action.
+ /// If no device dependence actions exist, just return the host action \a
+ /// HostAction. If an error is found or if no builder requires the host action
+ /// to be generated, return nullptr.
+ Action *
+ addDeviceDependencesToHostAction(Action *HostAction, const Arg *InputArg,
+ phases::ID CurPhase, phases::ID FinalPhase,
+ DeviceActionBuilder::PhasesTy &Phases) {
+ if (!IsValid)
+ return nullptr;
+
+ if (SpecializedBuilders.empty())
+ return HostAction;
+
+ assert(HostAction && "Invalid host action!");
+
+ OffloadAction::DeviceDependences DDeps;
+ // Check if all the programming models agree we should not emit the host
+ // action. Also, keep track of the offloading kinds employed.
+ auto &OffloadKind = InputArgToOffloadKindMap[InputArg];
+ unsigned InactiveBuilders = 0u;
+ unsigned IgnoringBuilders = 0u;
+ for (auto *SB : SpecializedBuilders) {
+ if (!SB->isValid()) {
+ ++InactiveBuilders;
+ continue;
+ }
+
+ auto RetCode =
+ SB->getDeviceDependences(DDeps, CurPhase, FinalPhase, Phases);
+
+ // If the builder explicitly says the host action should be ignored,
+ // we need to increment the variable that tracks the builders that request
+ // the host object to be ignored.
+ if (RetCode == DeviceActionBuilder::ABRT_Ignore_Host)
+ ++IgnoringBuilders;
+
+ // Unless the builder was inactive for this action, we have to record the
+ // offload kind because the host will have to use it.
+ if (RetCode != DeviceActionBuilder::ABRT_Inactive)
+ OffloadKind |= SB->getAssociatedOffloadKind();
+ }
+
+ // If all builders agree that the host object should be ignored, just return
+ // nullptr.
+ if (IgnoringBuilders &&
+ SpecializedBuilders.size() == (InactiveBuilders + IgnoringBuilders))
+ return nullptr;
+
+ if (DDeps.getActions().empty())
+ return HostAction;
+
+ // We have dependences we need to bundle together. We use an offload action
+ // for that.
OffloadAction::HostDependence HDep(
*HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
- /*BoundArch=*/nullptr, Action::OFK_Cuda);
- return C.MakeAction<OffloadAction>(HDep);
+ /*BoundArch=*/nullptr, DDeps);
+ return C.MakeAction<OffloadAction>(HDep, DDeps);
}
- // Collect all cuda_gpu_arch parameters, removing duplicates.
- SmallVector<CudaArch, 4> GpuArchList;
- llvm::SmallSet<CudaArch, 4> GpuArchs;
- for (Arg *A : Args) {
- if (!A->getOption().matches(options::OPT_cuda_gpu_arch_EQ))
- continue;
- A->claim();
+ /// Generate an action that adds a host dependence to a device action. The
+ /// results will be kept in this action builder. Return true if an error was
+ /// found.
+ bool addHostDependenceToDeviceActions(Action *&HostAction,
+ const Arg *InputArg) {
+ if (!IsValid)
+ return true;
- const auto &ArchStr = A->getValue();
- CudaArch Arch = StringToCudaArch(ArchStr);
- if (Arch == CudaArch::UNKNOWN)
- C.getDriver().Diag(clang::diag::err_drv_cuda_bad_gpu_arch) << ArchStr;
- else if (GpuArchs.insert(Arch).second)
- GpuArchList.push_back(Arch);
- }
-
- // Default to sm_20 which is the lowest common denominator for supported GPUs.
- // sm_20 code should work correctly, if suboptimally, on all newer GPUs.
- if (GpuArchList.empty())
- GpuArchList.push_back(CudaArch::SM_20);
-
- // Replicate inputs for each GPU architecture.
- Driver::InputList CudaDeviceInputs;
- for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I)
- CudaDeviceInputs.push_back(std::make_pair(types::TY_CUDA_DEVICE, InputArg));
-
- // Build actions for all device inputs.
- ActionList CudaDeviceActions;
- C.getDriver().BuildActions(C, Args, CudaDeviceInputs, CudaDeviceActions);
- assert(GpuArchList.size() == CudaDeviceActions.size() &&
- "Failed to create actions for all devices");
-
- // Check whether any of device actions stopped before they could generate PTX.
- bool PartialCompilation =
- llvm::any_of(CudaDeviceActions, [](const Action *a) {
- return a->getKind() != Action::AssembleJobClass;
- });
+ // If we are supporting bundling/unbundling and the current action is an
+ // input action of non-source file, we replace the host action by the
+ // unbundling action. The bundler tool has the logic to detect if an input
+ // is a bundle or not and if the input is not a bundle it assumes it is a
+ // host file. Therefore it is safe to create an unbundling action even if
+ // the input is not a bundle.
+ if (CanUseBundler && isa<InputAction>(HostAction) &&
+ InputArg->getOption().getKind() == llvm::opt::Option::InputClass &&
+ !types::isSrcFile(HostAction->getType())) {
+ auto UnbundlingHostAction =
+ C.MakeAction<OffloadUnbundlingJobAction>(HostAction);
+ UnbundlingHostAction->registerDependentActionInfo(
+ C.getSingleOffloadToolChain<Action::OFK_Host>(),
+ /*BoundArch=*/StringRef(), Action::OFK_Host);
+ HostAction = UnbundlingHostAction;
+ }
- const ToolChain *CudaTC = C.getSingleOffloadToolChain<Action::OFK_Cuda>();
+ assert(HostAction && "Invalid host action!");
- // Figure out what to do with device actions -- pass them as inputs to the
- // host action or run each of them independently.
- if (PartialCompilation || CompileDeviceOnly) {
- // In case of partial or device-only compilation results of device actions
- // are not consumed by the host action device actions have to be added to
- // top-level actions list with AtTopLevel=true and run independently.
+ // Register the offload kinds that are used.
+ auto &OffloadKind = InputArgToOffloadKindMap[InputArg];
+ for (auto *SB : SpecializedBuilders) {
+ if (!SB->isValid())
+ continue;
- // -o is ambiguous if we have more than one top-level action.
- if (Args.hasArg(options::OPT_o) &&
- (!CompileDeviceOnly || GpuArchList.size() > 1)) {
- C.getDriver().Diag(
- clang::diag::err_drv_output_argument_with_multiple_files);
- return nullptr;
+ auto RetCode = SB->addDeviceDepences(HostAction);
+
+ // Host dependences for device actions are not compatible with that same
+ // action being ignored.
+ assert(RetCode != DeviceActionBuilder::ABRT_Ignore_Host &&
+ "Host dependence not expected to be ignored.!");
+
+ // Unless the builder was inactive for this action, we have to record the
+ // offload kind because the host will have to use it.
+ if (RetCode != DeviceActionBuilder::ABRT_Inactive)
+ OffloadKind |= SB->getAssociatedOffloadKind();
}
- for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
- OffloadAction::DeviceDependences DDep;
- DDep.add(*CudaDeviceActions[I], *CudaTC, CudaArchToString(GpuArchList[I]),
- Action::OFK_Cuda);
- Actions.push_back(
- C.MakeAction<OffloadAction>(DDep, CudaDeviceActions[I]->getType()));
+ return false;
+ }
+
+ /// Add the offloading top level actions to the provided action list. This
+ /// function can replace the host action by a bundling action if the
+ /// programming models allow it.
+ bool appendTopLevelActions(ActionList &AL, Action *HostAction,
+ const Arg *InputArg) {
+ // Get the device actions to be appended.
+ ActionList OffloadAL;
+ for (auto *SB : SpecializedBuilders) {
+ if (!SB->isValid())
+ continue;
+ SB->appendTopLevelActions(OffloadAL);
}
- // Kill host action in case of device-only compilation.
- if (CompileDeviceOnly)
- return nullptr;
- return HostAction;
- }
-
- // If we're not a partial or device-only compilation, we compile each arch to
- // ptx and assemble to cubin, then feed the cubin *and* the ptx into a device
- // "link" action, which uses fatbinary to combine these cubins into one
- // fatbin. The fatbin is then an input to the host compilation.
- ActionList DeviceActions;
- for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
- Action* AssembleAction = CudaDeviceActions[I];
- assert(AssembleAction->getType() == types::TY_Object);
- assert(AssembleAction->getInputs().size() == 1);
-
- Action* BackendAction = AssembleAction->getInputs()[0];
- assert(BackendAction->getType() == types::TY_PP_Asm);
-
- for (auto &A : {AssembleAction, BackendAction}) {
- OffloadAction::DeviceDependences DDep;
- DDep.add(*A, *CudaTC, CudaArchToString(GpuArchList[I]), Action::OFK_Cuda);
- DeviceActions.push_back(C.MakeAction<OffloadAction>(DDep, A->getType()));
- }
- }
- auto FatbinAction =
- C.MakeAction<LinkJobAction>(DeviceActions, types::TY_CUDA_FATBIN);
-
- // Return a new host action that incorporates original host action and all
- // device actions.
- OffloadAction::HostDependence HDep(
- *HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
- /*BoundArch=*/nullptr, Action::OFK_Cuda);
- OffloadAction::DeviceDependences DDep;
- DDep.add(*FatbinAction, *CudaTC, /*BoundArch=*/nullptr, Action::OFK_Cuda);
- return C.MakeAction<OffloadAction>(HDep, DDep);
-}
+
+ // If we can use the bundler, replace the host action by the bundling one in
+ // the resulting list. Otherwise, just append the device actions.
+ if (CanUseBundler && !OffloadAL.empty()) {
+ // Add the host action to the list in order to create the bundling action.
+ OffloadAL.push_back(HostAction);
+
+ // We expect that the host action was just appended to the action list
+ // before this method was called.
+ assert(HostAction == AL.back() && "Host action not in the list??");
+ HostAction = C.MakeAction<OffloadBundlingJobAction>(OffloadAL);
+ AL.back() = HostAction;
+ } else
+ AL.append(OffloadAL.begin(), OffloadAL.end());
+
+ // Propagate to the current host action (if any) the offload information
+ // associated with the current input.
+ if (HostAction)
+ HostAction->propagateHostOffloadInfo(InputArgToOffloadKindMap[InputArg],
+ /*BoundArch=*/nullptr);
+ return false;
+ }
+
+ /// Processes the host linker action. This currently consists of replacing it
+ /// with an offload action if there are device link objects and propagate to
+ /// the host action all the offload kinds used in the current compilation. The
+ /// resulting action is returned.
+ Action *processHostLinkAction(Action *HostAction) {
+ // Add all the dependences from the device linking actions.
+ OffloadAction::DeviceDependences DDeps;
+ for (auto *SB : SpecializedBuilders) {
+ if (!SB->isValid())
+ continue;
+
+ SB->appendLinkDependences(DDeps);
+ }
+
+ // Calculate all the offload kinds used in the current compilation.
+ unsigned ActiveOffloadKinds = 0u;
+ for (auto &I : InputArgToOffloadKindMap)
+ ActiveOffloadKinds |= I.second;
+
+ // If we don't have device dependencies, we don't have to create an offload
+ // action.
+ if (DDeps.getActions().empty()) {
+ // Propagate all the active kinds to host action. Given that it is a link
+ // action it is assumed to depend on all actions generated so far.
+ HostAction->propagateHostOffloadInfo(ActiveOffloadKinds,
+ /*BoundArch=*/nullptr);
+ return HostAction;
+ }
+
+ // Create the offload action with all dependences. When an offload action
+ // is created the kinds are propagated to the host action, so we don't have
+ // to do that explicitly here.
+ OffloadAction::HostDependence HDep(
+ *HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
+ /*BoundArch*/ nullptr, ActiveOffloadKinds);
+ return C.MakeAction<OffloadAction>(HDep, DDeps);
+ }
+};
+} // anonymous namespace.
void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
const InputList &Inputs, ActionList &Actions) const {
@@ -1621,8 +2445,8 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
YcArg = YuArg = nullptr;
}
- // Track the host offload kinds used on this compilation.
- unsigned CompilationActiveOffloadHostKinds = 0u;
+ // Builder to be used to build offloading actions.
+ OffloadingActionBuilder OffloadBuilder(C, Args, Inputs);
// Construct the actions to perform.
ActionList LinkerInputs;
@@ -1670,12 +2494,14 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
if (YcArg) {
// Add a separate precompile phase for the compile phase.
if (FinalPhase >= phases::Compile) {
+ const types::ID HeaderType = lookupHeaderTypeForSourceType(InputType);
llvm::SmallVector<phases::ID, phases::MaxNumberOfPhases> PCHPL;
- types::getCompilationPhases(types::TY_CXXHeader, PCHPL);
+ types::getCompilationPhases(HeaderType, PCHPL);
Arg *PchInputArg = MakeInputArg(Args, Opts, YcArg->getValue());
// Build the pipeline for the pch file.
- Action *ClangClPch = C.MakeAction<InputAction>(*PchInputArg, InputType);
+ Action *ClangClPch =
+ C.MakeAction<InputAction>(*PchInputArg, HeaderType);
for (phases::ID Phase : PCHPL)
ClangClPch = ConstructPhaseAction(C, Args, Phase, ClangClPch);
assert(ClangClPch);
@@ -1686,17 +2512,14 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
}
}
- phases::ID CudaInjectionPhase =
- (phases::Compile < FinalPhase &&
- llvm::find(PL, phases::Compile) != PL.end())
- ? phases::Compile
- : FinalPhase;
-
- // Track the host offload kinds used on this input.
- unsigned InputActiveOffloadHostKinds = 0u;
-
// Build the pipeline for this file.
Action *Current = C.MakeAction<InputAction>(*InputArg, InputType);
+
+ // Use the current host action in any of the offloading actions, if
+ // required.
+ if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
+ break;
+
for (SmallVectorImpl<phases::ID>::iterator i = PL.begin(), e = PL.end();
i != e; ++i) {
phases::ID Phase = *i;
@@ -1705,6 +2528,12 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
if (Phase > FinalPhase)
break;
+ // Add any offload action the host action depends on.
+ Current = OffloadBuilder.addDeviceDependencesToHostAction(
+ Current, InputArg, Phase, FinalPhase, PL);
+ if (!Current)
+ break;
+
// Queue linker inputs.
if (Phase == phases::Link) {
assert((i + 1) == e && "linking must be final compilation step.");
@@ -1713,48 +2542,37 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
break;
}
- // Some types skip the assembler phase (e.g., llvm-bc), but we can't
- // encode this in the steps because the intermediate type depends on
- // arguments. Just special case here.
- if (Phase == phases::Assemble && Current->getType() != types::TY_PP_Asm)
- continue;
-
// Otherwise construct the appropriate action.
- Current = ConstructPhaseAction(C, Args, Phase, Current);
+ auto *NewCurrent = ConstructPhaseAction(C, Args, Phase, Current);
- if (InputType == types::TY_CUDA && Phase == CudaInjectionPhase) {
- Current = buildCudaActions(C, Args, InputArg, Current, Actions);
- if (!Current)
- break;
+ // We didn't create a new action, so we will just move to the next phase.
+ if (NewCurrent == Current)
+ continue;
- // We produced a CUDA action for this input, so the host has to support
- // CUDA.
- InputActiveOffloadHostKinds |= Action::OFK_Cuda;
- CompilationActiveOffloadHostKinds |= Action::OFK_Cuda;
- }
+ Current = NewCurrent;
+
+ // Use the current host action in any of the offloading actions, if
+ // required.
+ if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
+ break;
if (Current->getType() == types::TY_Nothing)
break;
}
- // If we ended with something, add to the output list. Also, propagate the
- // offload information to the top-level host action related with the current
- // input.
- if (Current) {
- if (InputActiveOffloadHostKinds)
- Current->propagateHostOffloadInfo(InputActiveOffloadHostKinds,
- /*BoundArch=*/nullptr);
+ // If we ended with something, add to the output list.
+ if (Current)
Actions.push_back(Current);
- }
+
+ // Add any top level actions generated for offloading.
+ OffloadBuilder.appendTopLevelActions(Actions, Current, InputArg);
}
- // Add a link action if necessary and propagate the offload information for
- // the current compilation.
+ // Add a link action if necessary.
if (!LinkerInputs.empty()) {
- Actions.push_back(
- C.MakeAction<LinkJobAction>(LinkerInputs, types::TY_Image));
- Actions.back()->propagateHostOffloadInfo(CompilationActiveOffloadHostKinds,
- /*BoundArch=*/nullptr);
+ Action *LA = C.MakeAction<LinkJobAction>(LinkerInputs, types::TY_Image);
+ LA = OffloadBuilder.processHostLinkAction(LA);
+ Actions.push_back(LA);
}
// If we are linking, claim any options which are obviously only used for
@@ -1776,6 +2594,13 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Action *Driver::ConstructPhaseAction(Compilation &C, const ArgList &Args,
phases::ID Phase, Action *Input) const {
llvm::PrettyStackTraceString CrashInfo("Constructing phase actions");
+
+ // Some types skip the assembler phase (e.g., llvm-bc), but we can't
+ // encode this in the steps because the intermediate type depends on
+ // arguments. Just special case here.
+ if (Phase == phases::Assemble && Input->getType() != types::TY_PP_Asm)
+ return Input;
+
// Build the appropriate action.
switch (Phase) {
case phases::Link:
@@ -1797,7 +2622,9 @@ Action *Driver::ConstructPhaseAction(Compilation &C, const ArgList &Args,
return C.MakeAction<PreprocessJobAction>(Input, OutputTy);
}
case phases::Precompile: {
- types::ID OutputTy = types::TY_PCH;
+ types::ID OutputTy = getPrecompiledType(Input->getType());
+ assert(OutputTy != types::TY_INVALID &&
+ "Cannot precompile this input type!");
if (Args.hasArg(options::OPT_fsyntax_only)) {
// Syntax checks should not emit a PCH file
OutputTy = types::TY_Nothing;
@@ -1888,11 +2715,11 @@ void Driver::BuildJobs(Compilation &C) const {
}
BuildJobsForAction(C, A, &C.getDefaultToolChain(),
- /*BoundArch*/ nullptr,
+ /*BoundArch*/ StringRef(),
/*AtTopLevel*/ true,
/*MultipleArchs*/ ArchNames.size() > 1,
/*LinkingOutput*/ LinkingOutput, CachedResults,
- /*BuildForOffloadDevice*/ false);
+ /*TargetDeviceOffloadKind*/ Action::OFK_None);
}
// If the user passed -Qunused-arguments or there were errors, don't warn
@@ -1941,177 +2768,335 @@ void Driver::BuildJobs(Compilation &C) const {
}
}
}
-/// Collapse an offloading action looking for a job of the given type. The input
-/// action is changed to the input of the collapsed sequence. If we effectively
-/// had a collapse return the corresponding offloading action, otherwise return
-/// null.
-template <typename T>
-static OffloadAction *collapseOffloadingAction(Action *&CurAction) {
- if (!CurAction)
- return nullptr;
- if (auto *OA = dyn_cast<OffloadAction>(CurAction)) {
- if (OA->hasHostDependence())
- if (auto *HDep = dyn_cast<T>(OA->getHostDependence())) {
- CurAction = HDep;
- return OA;
- }
- if (OA->hasSingleDeviceDependence())
- if (auto *DDep = dyn_cast<T>(OA->getSingleDeviceDependence())) {
- CurAction = DDep;
- return OA;
+
+namespace {
+/// Utility class to control the collapse of dependent actions and select the
+/// tools accordingly.
+class ToolSelector final {
+ /// The tool chain this selector refers to.
+ const ToolChain &TC;
+
+ /// The compilation this selector refers to.
+ const Compilation &C;
+
+ /// The base action this selector refers to.
+ const JobAction *BaseAction;
+
+ /// Set to true if the current toolchain refers to host actions.
+ bool IsHostSelector;
+
+ /// Set to true if save-temps and embed-bitcode functionalities are active.
+ bool SaveTemps;
+ bool EmbedBitcode;
+
+ /// Get previous dependent action or null if that does not exist. If
+ /// \a CanBeCollapsed is false, that action must be legal to collapse or
+ /// null will be returned.
+ const JobAction *getPrevDependentAction(const ActionList &Inputs,
+ ActionList &SavedOffloadAction,
+ bool CanBeCollapsed = true) {
+ // An option can be collapsed only if it has a single input.
+ if (Inputs.size() != 1)
+ return nullptr;
+
+ Action *CurAction = *Inputs.begin();
+ if (CanBeCollapsed &&
+ !CurAction->isCollapsingWithNextDependentActionLegal())
+ return nullptr;
+
+ // If the input action is an offload action. Look through it and save any
+ // offload action that can be dropped in the event of a collapse.
+ if (auto *OA = dyn_cast<OffloadAction>(CurAction)) {
+ // If the dependent action is a device action, we will attempt to collapse
+ // only with other device actions. Otherwise, we would do the same but
+ // with host actions only.
+ if (!IsHostSelector) {
+ if (OA->hasSingleDeviceDependence(/*DoNotConsiderHostActions=*/true)) {
+ CurAction =
+ OA->getSingleDeviceDependence(/*DoNotConsiderHostActions=*/true);
+ if (CanBeCollapsed &&
+ !CurAction->isCollapsingWithNextDependentActionLegal())
+ return nullptr;
+ SavedOffloadAction.push_back(OA);
+ return dyn_cast<JobAction>(CurAction);
+ }
+ } else if (OA->hasHostDependence()) {
+ CurAction = OA->getHostDependence();
+ if (CanBeCollapsed &&
+ !CurAction->isCollapsingWithNextDependentActionLegal())
+ return nullptr;
+ SavedOffloadAction.push_back(OA);
+ return dyn_cast<JobAction>(CurAction);
}
- }
- return nullptr;
-}
-// Returns a Tool for a given JobAction. In case the action and its
-// predecessors can be combined, updates Inputs with the inputs of the
-// first combined action. If one of the collapsed actions is a
-// CudaHostAction, updates CollapsedCHA with the pointer to it so the
-// caller can deal with extra handling such action requires.
-static const Tool *selectToolForJob(Compilation &C, bool SaveTemps,
- bool EmbedBitcode, const ToolChain *TC,
- const JobAction *JA,
- const ActionList *&Inputs,
- ActionList &CollapsedOffloadAction) {
- const Tool *ToolForJob = nullptr;
- CollapsedOffloadAction.clear();
-
- // See if we should look for a compiler with an integrated assembler. We match
- // bottom up, so what we are actually looking for is an assembler job with a
- // compiler input.
-
- // Look through offload actions between assembler and backend actions.
- Action *BackendJA = (isa<AssembleJobAction>(JA) && Inputs->size() == 1)
- ? *Inputs->begin()
- : nullptr;
- auto *BackendOA = collapseOffloadingAction<BackendJobAction>(BackendJA);
-
- if (TC->useIntegratedAs() && !SaveTemps &&
- !C.getArgs().hasArg(options::OPT_via_file_asm) &&
- !C.getArgs().hasArg(options::OPT__SLASH_FA) &&
- !C.getArgs().hasArg(options::OPT__SLASH_Fa) && BackendJA &&
- isa<BackendJobAction>(BackendJA)) {
- // A BackendJob is always preceded by a CompileJob, and without -save-temps
- // or -fembed-bitcode, they will always get combined together, so instead of
- // checking the backend tool, check if the tool for the CompileJob has an
- // integrated assembler. For -fembed-bitcode, CompileJob is still used to
- // look up tools for BackendJob, but they need to match before we can split
- // them.
-
- // Look through offload actions between backend and compile actions.
- Action *CompileJA = *BackendJA->getInputs().begin();
- auto *CompileOA = collapseOffloadingAction<CompileJobAction>(CompileJA);
-
- assert(CompileJA && isa<CompileJobAction>(CompileJA) &&
- "Backend job is not preceeded by compile job.");
- const Tool *Compiler = TC->SelectTool(*cast<CompileJobAction>(CompileJA));
- if (!Compiler)
return nullptr;
+ }
+
+ return dyn_cast<JobAction>(CurAction);
+ }
+
+ /// Return true if an assemble action can be collapsed.
+ bool canCollapseAssembleAction() const {
+ return TC.useIntegratedAs() && !SaveTemps &&
+ !C.getArgs().hasArg(options::OPT_via_file_asm) &&
+ !C.getArgs().hasArg(options::OPT__SLASH_FA) &&
+ !C.getArgs().hasArg(options::OPT__SLASH_Fa);
+ }
+
+ /// Return true if a preprocessor action can be collapsed.
+ bool canCollapsePreprocessorAction() const {
+ return !C.getArgs().hasArg(options::OPT_no_integrated_cpp) &&
+ !C.getArgs().hasArg(options::OPT_traditional_cpp) && !SaveTemps &&
+ !C.getArgs().hasArg(options::OPT_rewrite_objc);
+ }
+
+ /// Struct that relates an action with the offload actions that would be
+ /// collapsed with it.
+ struct JobActionInfo final {
+ /// The action this info refers to.
+ const JobAction *JA = nullptr;
+ /// The offload actions we need to take care off if this action is
+ /// collapsed.
+ ActionList SavedOffloadAction;
+ };
+
+ /// Append collapsed offload actions from the give nnumber of elements in the
+ /// action info array.
+ static void AppendCollapsedOffloadAction(ActionList &CollapsedOffloadAction,
+ ArrayRef<JobActionInfo> &ActionInfo,
+ unsigned ElementNum) {
+ assert(ElementNum <= ActionInfo.size() && "Invalid number of elements.");
+ for (unsigned I = 0; I < ElementNum; ++I)
+ CollapsedOffloadAction.append(ActionInfo[I].SavedOffloadAction.begin(),
+ ActionInfo[I].SavedOffloadAction.end());
+ }
+
+ /// Functions that attempt to perform the combining. They detect if that is
+ /// legal, and if so they update the inputs \a Inputs and the offload action
+ /// that were collapsed in \a CollapsedOffloadAction. A tool that deals with
+ /// the combined action is returned. If the combining is not legal or if the
+ /// tool does not exist, null is returned.
+ /// Currently three kinds of collapsing are supported:
+ /// - Assemble + Backend + Compile;
+ /// - Assemble + Backend ;
+ /// - Backend + Compile.
+ const Tool *
+ combineAssembleBackendCompile(ArrayRef<JobActionInfo> ActionInfo,
+ const ActionList *&Inputs,
+ ActionList &CollapsedOffloadAction) {
+ if (ActionInfo.size() < 3 || !canCollapseAssembleAction())
+ return nullptr;
+ auto *AJ = dyn_cast<AssembleJobAction>(ActionInfo[0].JA);
+ auto *BJ = dyn_cast<BackendJobAction>(ActionInfo[1].JA);
+ auto *CJ = dyn_cast<CompileJobAction>(ActionInfo[2].JA);
+ if (!AJ || !BJ || !CJ)
+ return nullptr;
+
+ // Get compiler tool.
+ const Tool *T = TC.SelectTool(*CJ);
+ if (!T)
+ return nullptr;
+
// When using -fembed-bitcode, it is required to have the same tool (clang)
// for both CompilerJA and BackendJA. Otherwise, combine two stages.
if (EmbedBitcode) {
- JobAction *InputJA = cast<JobAction>(*Inputs->begin());
- const Tool *BackendTool = TC->SelectTool(*InputJA);
- if (BackendTool == Compiler)
- CompileJA = InputJA;
- }
- if (Compiler->hasIntegratedAssembler()) {
- Inputs = &CompileJA->getInputs();
- ToolForJob = Compiler;
- // Save the collapsed offload actions because they may still contain
- // device actions.
- if (CompileOA)
- CollapsedOffloadAction.push_back(CompileOA);
- if (BackendOA)
- CollapsedOffloadAction.push_back(BackendOA);
- }
- }
-
- // A backend job should always be combined with the preceding compile job
- // unless OPT_save_temps or OPT_fembed_bitcode is enabled and the compiler is
- // capable of emitting LLVM IR as an intermediate output.
- if (isa<BackendJobAction>(JA)) {
- // Check if the compiler supports emitting LLVM IR.
- assert(Inputs->size() == 1);
-
- // Look through offload actions between backend and compile actions.
- Action *CompileJA = *JA->getInputs().begin();
- auto *CompileOA = collapseOffloadingAction<CompileJobAction>(CompileJA);
-
- assert(CompileJA && isa<CompileJobAction>(CompileJA) &&
- "Backend job is not preceeded by compile job.");
- const Tool *Compiler = TC->SelectTool(*cast<CompileJobAction>(CompileJA));
- if (!Compiler)
+ const Tool *BT = TC.SelectTool(*BJ);
+ if (BT == T)
+ return nullptr;
+ }
+
+ if (!T->hasIntegratedAssembler())
return nullptr;
- if (!Compiler->canEmitIR() ||
- (!SaveTemps && !EmbedBitcode)) {
- Inputs = &CompileJA->getInputs();
- ToolForJob = Compiler;
- if (CompileOA)
- CollapsedOffloadAction.push_back(CompileOA);
- }
+ Inputs = &CJ->getInputs();
+ AppendCollapsedOffloadAction(CollapsedOffloadAction, ActionInfo,
+ /*NumElements=*/3);
+ return T;
}
+ const Tool *combineAssembleBackend(ArrayRef<JobActionInfo> ActionInfo,
+ const ActionList *&Inputs,
+ ActionList &CollapsedOffloadAction) {
+ if (ActionInfo.size() < 2 || !canCollapseAssembleAction())
+ return nullptr;
+ auto *AJ = dyn_cast<AssembleJobAction>(ActionInfo[0].JA);
+ auto *BJ = dyn_cast<BackendJobAction>(ActionInfo[1].JA);
+ if (!AJ || !BJ)
+ return nullptr;
- // Otherwise use the tool for the current job.
- if (!ToolForJob)
- ToolForJob = TC->SelectTool(*JA);
+ // Retrieve the compile job, backend action must always be preceded by one.
+ ActionList CompileJobOffloadActions;
+ auto *CJ = getPrevDependentAction(BJ->getInputs(), CompileJobOffloadActions,
+ /*CanBeCollapsed=*/false);
+ if (!AJ || !BJ || !CJ)
+ return nullptr;
- // See if we should use an integrated preprocessor. We do so when we have
- // exactly one input, since this is the only use case we care about
- // (irrelevant since we don't support combine yet).
+ assert(isa<CompileJobAction>(CJ) &&
+ "Expecting compile job preceding backend job.");
- // Look through offload actions after preprocessing.
- Action *PreprocessJA = (Inputs->size() == 1) ? *Inputs->begin() : nullptr;
- auto *PreprocessOA =
- collapseOffloadingAction<PreprocessJobAction>(PreprocessJA);
+ // Get compiler tool.
+ const Tool *T = TC.SelectTool(*CJ);
+ if (!T)
+ return nullptr;
+
+ if (!T->hasIntegratedAssembler())
+ return nullptr;
- if (PreprocessJA && isa<PreprocessJobAction>(PreprocessJA) &&
- !C.getArgs().hasArg(options::OPT_no_integrated_cpp) &&
- !C.getArgs().hasArg(options::OPT_traditional_cpp) && !SaveTemps &&
- !C.getArgs().hasArg(options::OPT_rewrite_objc) &&
- ToolForJob->hasIntegratedCPP()) {
- Inputs = &PreprocessJA->getInputs();
- if (PreprocessOA)
- CollapsedOffloadAction.push_back(PreprocessOA);
+ Inputs = &BJ->getInputs();
+ AppendCollapsedOffloadAction(CollapsedOffloadAction, ActionInfo,
+ /*NumElements=*/2);
+ return T;
}
+ const Tool *combineBackendCompile(ArrayRef<JobActionInfo> ActionInfo,
+ const ActionList *&Inputs,
+ ActionList &CollapsedOffloadAction) {
+ if (ActionInfo.size() < 2 || !canCollapsePreprocessorAction())
+ return nullptr;
+ auto *BJ = dyn_cast<BackendJobAction>(ActionInfo[0].JA);
+ auto *CJ = dyn_cast<CompileJobAction>(ActionInfo[1].JA);
+ if (!BJ || !CJ)
+ return nullptr;
+
+ // Get compiler tool.
+ const Tool *T = TC.SelectTool(*CJ);
+ if (!T)
+ return nullptr;
+
+ if (T->canEmitIR() && (SaveTemps || EmbedBitcode))
+ return nullptr;
+
+ Inputs = &CJ->getInputs();
+ AppendCollapsedOffloadAction(CollapsedOffloadAction, ActionInfo,
+ /*NumElements=*/2);
+ return T;
+ }
+
+ /// Updates the inputs if the obtained tool supports combining with
+ /// preprocessor action, and the current input is indeed a preprocessor
+ /// action. If combining results in the collapse of offloading actions, those
+ /// are appended to \a CollapsedOffloadAction.
+ void combineWithPreprocessor(const Tool *T, const ActionList *&Inputs,
+ ActionList &CollapsedOffloadAction) {
+ if (!T || !canCollapsePreprocessorAction() || !T->hasIntegratedCPP())
+ return;
+
+ // Attempt to get a preprocessor action dependence.
+ ActionList PreprocessJobOffloadActions;
+ auto *PJ = getPrevDependentAction(*Inputs, PreprocessJobOffloadActions);
+ if (!PJ || !isa<PreprocessJobAction>(PJ))
+ return;
+
+ // This is legal to combine. Append any offload action we found and set the
+ // current inputs to preprocessor inputs.
+ CollapsedOffloadAction.append(PreprocessJobOffloadActions.begin(),
+ PreprocessJobOffloadActions.end());
+ Inputs = &PJ->getInputs();
+ }
+
+public:
+ ToolSelector(const JobAction *BaseAction, const ToolChain &TC,
+ const Compilation &C, bool SaveTemps, bool EmbedBitcode)
+ : TC(TC), C(C), BaseAction(BaseAction), SaveTemps(SaveTemps),
+ EmbedBitcode(EmbedBitcode) {
+ assert(BaseAction && "Invalid base action.");
+ IsHostSelector = BaseAction->getOffloadingDeviceKind() == Action::OFK_None;
+ }
+
+ /// Check if a chain of actions can be combined and return the tool that can
+ /// handle the combination of actions. The pointer to the current inputs \a
+ /// Inputs and the list of offload actions \a CollapsedOffloadActions
+ /// connected to collapsed actions are updated accordingly. The latter enables
+ /// the caller of the selector to process them afterwards instead of just
+ /// dropping them. If no suitable tool is found, null will be returned.
+ const Tool *getTool(const ActionList *&Inputs,
+ ActionList &CollapsedOffloadAction) {
+ //
+ // Get the largest chain of actions that we could combine.
+ //
+
+ SmallVector<JobActionInfo, 5> ActionChain(1);
+ ActionChain.back().JA = BaseAction;
+ while (ActionChain.back().JA) {
+ const Action *CurAction = ActionChain.back().JA;
+
+ // Grow the chain by one element.
+ ActionChain.resize(ActionChain.size() + 1);
+ JobActionInfo &AI = ActionChain.back();
+
+ // Attempt to fill it with the
+ AI.JA =
+ getPrevDependentAction(CurAction->getInputs(), AI.SavedOffloadAction);
+ }
+
+ // Pop the last action info as it could not be filled.
+ ActionChain.pop_back();
- return ToolForJob;
+ //
+ // Attempt to combine actions. If all combining attempts failed, just return
+ // the tool of the provided action. At the end we attempt to combine the
+ // action with any preprocessor action it may depend on.
+ //
+
+ const Tool *T = combineAssembleBackendCompile(ActionChain, Inputs,
+ CollapsedOffloadAction);
+ if (!T)
+ T = combineAssembleBackend(ActionChain, Inputs, CollapsedOffloadAction);
+ if (!T)
+ T = combineBackendCompile(ActionChain, Inputs, CollapsedOffloadAction);
+ if (!T) {
+ Inputs = &BaseAction->getInputs();
+ T = TC.SelectTool(*BaseAction);
+ }
+
+ combineWithPreprocessor(T, Inputs, CollapsedOffloadAction);
+ return T;
+ }
+};
}
-InputInfo Driver::BuildJobsForAction(
- Compilation &C, const Action *A, const ToolChain *TC, const char *BoundArch,
- bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
- std::map<std::pair<const Action *, std::string>, InputInfo> &CachedResults,
- bool BuildForOffloadDevice) const {
- // The bound arch is not necessarily represented in the toolchain's triple --
- // for example, armv7 and armv7s both map to the same triple -- so we need
- // both in our map.
+/// Return a string that uniquely identifies the result of a job. The bound arch
+/// is not necessarily represented in the toolchain's triple -- for example,
+/// armv7 and armv7s both map to the same triple -- so we need both in our map.
+/// Also, we need to add the offloading device kind, as the same tool chain can
+/// be used for host and device for some programming models, e.g. OpenMP.
+static std::string GetTriplePlusArchString(const ToolChain *TC,
+ StringRef BoundArch,
+ Action::OffloadKind OffloadKind) {
std::string TriplePlusArch = TC->getTriple().normalize();
- if (BoundArch) {
+ if (!BoundArch.empty()) {
TriplePlusArch += "-";
TriplePlusArch += BoundArch;
}
- std::pair<const Action *, std::string> ActionTC = {A, TriplePlusArch};
+ TriplePlusArch += "-";
+ TriplePlusArch += Action::GetOffloadKindName(OffloadKind);
+ return TriplePlusArch;
+}
+
+InputInfo Driver::BuildJobsForAction(
+ Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
+ bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
+ std::map<std::pair<const Action *, std::string>, InputInfo> &CachedResults,
+ Action::OffloadKind TargetDeviceOffloadKind) const {
+ std::pair<const Action *, std::string> ActionTC = {
+ A, GetTriplePlusArchString(TC, BoundArch, TargetDeviceOffloadKind)};
auto CachedResult = CachedResults.find(ActionTC);
if (CachedResult != CachedResults.end()) {
return CachedResult->second;
}
InputInfo Result = BuildJobsForActionNoCache(
C, A, TC, BoundArch, AtTopLevel, MultipleArchs, LinkingOutput,
- CachedResults, BuildForOffloadDevice);
+ CachedResults, TargetDeviceOffloadKind);
CachedResults[ActionTC] = Result;
return Result;
}
InputInfo Driver::BuildJobsForActionNoCache(
- Compilation &C, const Action *A, const ToolChain *TC, const char *BoundArch,
+ Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
std::map<std::pair<const Action *, std::string>, InputInfo> &CachedResults,
- bool BuildForOffloadDevice) const {
+ Action::OffloadKind TargetDeviceOffloadKind) const {
llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
InputInfoList OffloadDependencesInputInfo;
+ bool BuildingForOffloadDevice = TargetDeviceOffloadKind != Action::OFK_None;
if (const OffloadAction *OA = dyn_cast<OffloadAction>(A)) {
// The offload action is expected to be used in four different situations.
//
@@ -2121,7 +3106,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
// b) Set a toolchain/architecture/kind for a device action;
// Device Action 1 -> OffloadAction -> Device Action 2
//
- // c) Specify a device dependences to a host action;
+ // c) Specify a device dependence to a host action;
// Device Action 1 _
// \
// Host Action 1 ---> OffloadAction -> Host Action 2
@@ -2144,7 +3129,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
DevA =
BuildJobsForAction(C, DepA, DepTC, DepBoundArch, AtTopLevel,
/*MultipleArchs*/ !!DepBoundArch, LinkingOutput,
- CachedResults, /*BuildForOffloadDevice=*/true);
+ CachedResults, DepA->getOffloadingDeviceKind());
});
return DevA;
}
@@ -2154,16 +3139,15 @@ InputInfo Driver::BuildJobsForActionNoCache(
// generate the host dependences and override the action with the device
// dependence. The dependences can't therefore be a top-level action.
OA->doOnEachDependence(
- /*IsHostDependence=*/BuildForOffloadDevice,
+ /*IsHostDependence=*/BuildingForOffloadDevice,
[&](Action *DepA, const ToolChain *DepTC, const char *DepBoundArch) {
OffloadDependencesInputInfo.push_back(BuildJobsForAction(
C, DepA, DepTC, DepBoundArch, /*AtTopLevel=*/false,
/*MultipleArchs*/ !!DepBoundArch, LinkingOutput, CachedResults,
- /*BuildForOffloadDevice=*/DepA->getOffloadingDeviceKind() !=
- Action::OFK_None));
+ DepA->getOffloadingDeviceKind()));
});
- A = BuildForOffloadDevice
+ A = BuildingForOffloadDevice
? OA->getSingleDeviceDependence(/*DoNotConsiderHostActions=*/true)
: OA->getHostDependence();
}
@@ -2182,9 +3166,9 @@ InputInfo Driver::BuildJobsForActionNoCache(
if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
const ToolChain *TC;
- const char *ArchName = BAA->getArchName();
+ StringRef ArchName = BAA->getArchName();
- if (ArchName)
+ if (!ArchName.empty())
TC = &getToolChain(C.getArgs(),
computeTargetTriple(*this, DefaultTargetTriple,
C.getArgs(), ArchName));
@@ -2193,7 +3177,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
return BuildJobsForAction(C, *BAA->input_begin(), TC, ArchName, AtTopLevel,
MultipleArchs, LinkingOutput, CachedResults,
- BuildForOffloadDevice);
+ TargetDeviceOffloadKind);
}
@@ -2202,9 +3186,9 @@ InputInfo Driver::BuildJobsForActionNoCache(
const JobAction *JA = cast<JobAction>(A);
ActionList CollapsedOffloadActions;
- const Tool *T =
- selectToolForJob(C, isSaveTempsEnabled(), embedBitcodeEnabled(), TC, JA,
- Inputs, CollapsedOffloadActions);
+ ToolSelector TS(JA, *TC, C, isSaveTempsEnabled(), embedBitcodeInObject());
+ const Tool *T = TS.getTool(Inputs, CollapsedOffloadActions);
+
if (!T)
return InputInfo();
@@ -2212,13 +3196,12 @@ InputInfo Driver::BuildJobsForActionNoCache(
// need to build jobs for host/device-side inputs it may have held.
for (const auto *OA : CollapsedOffloadActions)
cast<OffloadAction>(OA)->doOnEachDependence(
- /*IsHostDependence=*/BuildForOffloadDevice,
+ /*IsHostDependence=*/BuildingForOffloadDevice,
[&](Action *DepA, const ToolChain *DepTC, const char *DepBoundArch) {
OffloadDependencesInputInfo.push_back(BuildJobsForAction(
- C, DepA, DepTC, DepBoundArch, AtTopLevel,
+ C, DepA, DepTC, DepBoundArch, /* AtTopLevel */ false,
/*MultipleArchs=*/!!DepBoundArch, LinkingOutput, CachedResults,
- /*BuildForOffloadDevice=*/DepA->getOffloadingDeviceKind() !=
- Action::OFK_None));
+ DepA->getOffloadingDeviceKind()));
});
// Only use pipes when there is exactly one input.
@@ -2231,7 +3214,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
AtTopLevel && (isa<DsymutilJobAction>(A) || isa<VerifyJobAction>(A));
InputInfos.push_back(BuildJobsForAction(
C, Input, TC, BoundArch, SubJobAtTopLevel, MultipleArchs, LinkingOutput,
- CachedResults, BuildForOffloadDevice));
+ CachedResults, A->getOffloadingDeviceKind()));
}
// Always use the first input as the base input.
@@ -2247,15 +3230,75 @@ InputInfo Driver::BuildJobsForActionNoCache(
InputInfos.append(OffloadDependencesInputInfo.begin(),
OffloadDependencesInputInfo.end());
+ // Set the effective triple of the toolchain for the duration of this job.
+ llvm::Triple EffectiveTriple;
+ const ToolChain &ToolTC = T->getToolChain();
+ const ArgList &Args =
+ C.getArgsForToolChain(TC, BoundArch, A->getOffloadingDeviceKind());
+ if (InputInfos.size() != 1) {
+ EffectiveTriple = llvm::Triple(ToolTC.ComputeEffectiveClangTriple(Args));
+ } else {
+ // Pass along the input type if it can be unambiguously determined.
+ EffectiveTriple = llvm::Triple(
+ ToolTC.ComputeEffectiveClangTriple(Args, InputInfos[0].getType()));
+ }
+ RegisterEffectiveTriple TripleRAII(ToolTC, EffectiveTriple);
+
// Determine the place to write output to, if any.
InputInfo Result;
- if (JA->getType() == types::TY_Nothing)
+ InputInfoList UnbundlingResults;
+ if (auto *UA = dyn_cast<OffloadUnbundlingJobAction>(JA)) {
+ // If we have an unbundling job, we need to create results for all the
+ // outputs. We also update the results cache so that other actions using
+ // this unbundling action can get the right results.
+ for (auto &UI : UA->getDependentActionsInfo()) {
+ assert(UI.DependentOffloadKind != Action::OFK_None &&
+ "Unbundling with no offloading??");
+
+ // Unbundling actions are never at the top level. When we generate the
+ // offloading prefix, we also do that for the host file because the
+ // unbundling action does not change the type of the output which can
+ // cause a overwrite.
+ std::string OffloadingPrefix = Action::GetOffloadingFileNamePrefix(
+ UI.DependentOffloadKind,
+ UI.DependentToolChain->getTriple().normalize(),
+ /*CreatePrefixForHost=*/true);
+ auto CurI = InputInfo(
+ UA, GetNamedOutputPath(C, *UA, BaseInput, UI.DependentBoundArch,
+ /*AtTopLevel=*/false, MultipleArchs,
+ OffloadingPrefix),
+ BaseInput);
+ // Save the unbundling result.
+ UnbundlingResults.push_back(CurI);
+
+ // Get the unique string identifier for this dependence and cache the
+ // result.
+ CachedResults[{A, GetTriplePlusArchString(
+ UI.DependentToolChain, UI.DependentBoundArch,
+ UI.DependentOffloadKind)}] = CurI;
+ }
+
+ // Now that we have all the results generated, select the one that should be
+ // returned for the current depending action.
+ std::pair<const Action *, std::string> ActionTC = {
+ A, GetTriplePlusArchString(TC, BoundArch, TargetDeviceOffloadKind)};
+ assert(CachedResults.find(ActionTC) != CachedResults.end() &&
+ "Result does not exist??");
+ Result = CachedResults[ActionTC];
+ } else if (JA->getType() == types::TY_Nothing)
Result = InputInfo(A, BaseInput);
- else
+ else {
+ // We only have to generate a prefix for the host if this is not a top-level
+ // action.
+ std::string OffloadingPrefix = Action::GetOffloadingFileNamePrefix(
+ A->getOffloadingDeviceKind(), TC->getTriple().normalize(),
+ /*CreatePrefixForHost=*/!!A->getOffloadingHostActiveKinds() &&
+ !AtTopLevel);
Result = InputInfo(A, GetNamedOutputPath(C, *JA, BaseInput, BoundArch,
AtTopLevel, MultipleArchs,
- TC->getTriple().normalize()),
+ OffloadingPrefix),
BaseInput);
+ }
if (CCCPrintBindings && !CCGenDiagnostics) {
llvm::errs() << "# \"" << T->getToolChain().getTripleString() << '"'
@@ -2265,10 +3308,28 @@ InputInfo Driver::BuildJobsForActionNoCache(
if (i + 1 != e)
llvm::errs() << ", ";
}
- llvm::errs() << "], output: " << Result.getAsString() << "\n";
+ if (UnbundlingResults.empty())
+ llvm::errs() << "], output: " << Result.getAsString() << "\n";
+ else {
+ llvm::errs() << "], outputs: [";
+ for (unsigned i = 0, e = UnbundlingResults.size(); i != e; ++i) {
+ llvm::errs() << UnbundlingResults[i].getAsString();
+ if (i + 1 != e)
+ llvm::errs() << ", ";
+ }
+ llvm::errs() << "] \n";
+ }
} else {
- T->ConstructJob(C, *JA, Result, InputInfos,
- C.getArgsForToolChain(TC, BoundArch), LinkingOutput);
+ if (UnbundlingResults.empty())
+ T->ConstructJob(
+ C, *JA, Result, InputInfos,
+ C.getArgsForToolChain(TC, BoundArch, JA->getOffloadingDeviceKind()),
+ LinkingOutput);
+ else
+ T->ConstructJobMultipleOutputs(
+ C, *JA, UnbundlingResults, InputInfos,
+ C.getArgsForToolChain(TC, BoundArch, JA->getOffloadingDeviceKind()),
+ LinkingOutput);
}
return Result;
}
@@ -2313,9 +3374,9 @@ static const char *MakeCLOutputFilename(const ArgList &Args, StringRef ArgValue,
const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
const char *BaseInput,
- const char *BoundArch, bool AtTopLevel,
+ StringRef BoundArch, bool AtTopLevel,
bool MultipleArchs,
- StringRef NormalizedTriple) const {
+ StringRef OffloadingPrefix) const {
llvm::PrettyStackTraceString CrashInfo("Computing output path");
// Output to a user requested destination?
if (AtTopLevel && !isa<DsymutilJobAction>(JA) && !isa<VerifyJobAction>(JA)) {
@@ -2360,7 +3421,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
std::pair<StringRef, StringRef> Split = Name.split('.');
std::string TmpName = GetTemporaryPath(
Split.first, types::getTypeTempSuffix(JA.getType(), IsCLMode()));
- return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
+ return C.addTempFile(C.getArgs().MakeArgString(TmpName));
}
SmallString<128> BasePath(BaseInput);
@@ -2375,7 +3436,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
// Determine what the derived output name should be.
const char *NamedOutput;
- if (JA.getType() == types::TY_Object &&
+ if ((JA.getType() == types::TY_Object || JA.getType() == types::TY_LTO_BC) &&
C.getArgs().hasArg(options::OPT__SLASH_Fo, options::OPT__SLASH_o)) {
// The /Fo or /o flag decides the object filename.
StringRef Val =
@@ -2399,17 +3460,17 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
// clang-cl uses BaseName for the executable name.
NamedOutput =
MakeCLOutputFilename(C.getArgs(), "", BaseName, types::TY_Image);
- } else if (MultipleArchs && BoundArch) {
+ } else {
SmallString<128> Output(getDefaultImageName());
- Output += JA.getOffloadingFileNamePrefix(NormalizedTriple);
- Output += "-";
- Output.append(BoundArch);
+ Output += OffloadingPrefix;
+ if (MultipleArchs && !BoundArch.empty()) {
+ Output += "-";
+ Output.append(BoundArch);
+ }
NamedOutput = C.getArgs().MakeArgString(Output.c_str());
- } else {
- NamedOutput = getDefaultImageName();
}
} else if (JA.getType() == types::TY_PCH && IsCLMode()) {
- NamedOutput = C.getArgs().MakeArgString(GetClPchPath(C, BaseName).c_str());
+ NamedOutput = C.getArgs().MakeArgString(GetClPchPath(C, BaseName));
} else {
const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
assert(Suffix && "All types used for output should have a suffix.");
@@ -2418,8 +3479,8 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
if (!types::appendSuffixForType(JA.getType()))
End = BaseName.rfind('.');
SmallString<128> Suffixed(BaseName.substr(0, End));
- Suffixed += JA.getOffloadingFileNamePrefix(NormalizedTriple);
- if (MultipleArchs && BoundArch) {
+ Suffixed += OffloadingPrefix;
+ if (MultipleArchs && !BoundArch.empty()) {
Suffixed += "-";
Suffixed.append(BoundArch);
}
@@ -2459,7 +3520,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
std::pair<StringRef, StringRef> Split = Name.split('.');
std::string TmpName = GetTemporaryPath(
Split.first, types::getTypeTempSuffix(JA.getType(), IsCLMode()));
- return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
+ return C.addTempFile(C.getArgs().MakeArgString(TmpName));
}
}
@@ -2476,7 +3537,7 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
}
}
-std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
+std::string Driver::GetFilePath(StringRef Name, const ToolChain &TC) const {
// Respect a limited subset of the '-Bprefix' functionality in GCC by
// attempting to use this prefix when looking for file paths.
for (const std::string &Dir : PrefixDirs) {
@@ -2506,16 +3567,16 @@ std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
}
void Driver::generatePrefixedToolNames(
- const char *Tool, const ToolChain &TC,
+ StringRef Tool, const ToolChain &TC,
SmallVectorImpl<std::string> &Names) const {
// FIXME: Needs a better variable than DefaultTargetTriple
- Names.emplace_back(DefaultTargetTriple + "-" + Tool);
+ Names.emplace_back((DefaultTargetTriple + "-" + Tool).str());
Names.emplace_back(Tool);
// Allow the discovery of tools prefixed with LLVM's default target triple.
std::string LLVMDefaultTargetTriple = llvm::sys::getDefaultTargetTriple();
if (LLVMDefaultTargetTriple != DefaultTargetTriple)
- Names.emplace_back(LLVMDefaultTargetTriple + "-" + Tool);
+ Names.emplace_back((LLVMDefaultTargetTriple + "-" + Tool).str());
}
static bool ScanDirForExecutable(SmallString<128> &Dir,
@@ -2529,8 +3590,7 @@ static bool ScanDirForExecutable(SmallString<128> &Dir,
return false;
}
-std::string Driver::GetProgramPath(const char *Name,
- const ToolChain &TC) const {
+std::string Driver::GetProgramPath(StringRef Name, const ToolChain &TC) const {
SmallVector<std::string, 2> TargetSpecificExecutables;
generatePrefixedToolNames(Name, TC, TargetSpecificExecutables);
@@ -2542,7 +3602,7 @@ std::string Driver::GetProgramPath(const char *Name,
if (ScanDirForExecutable(P, TargetSpecificExecutables))
return P.str();
} else {
- SmallString<128> P(PrefixDir + Name);
+ SmallString<128> P((PrefixDir + Name).str());
if (llvm::sys::fs::can_execute(Twine(P)))
return P.str();
}
@@ -2564,8 +3624,7 @@ std::string Driver::GetProgramPath(const char *Name,
return Name;
}
-std::string Driver::GetTemporaryPath(StringRef Prefix,
- const char *Suffix) const {
+std::string Driver::GetTemporaryPath(StringRef Prefix, StringRef Suffix) const {
SmallString<128> Path;
std::error_code EC = llvm::sys::fs::createTemporaryFile(Prefix, Suffix, Path);
if (EC) {
@@ -2645,6 +3704,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::NaCl:
TC = new toolchains::NaClToolChain(*this, Target, Args);
break;
+ case llvm::Triple::Fuchsia:
+ TC = new toolchains::Fuchsia(*this, Target, Args);
+ break;
case llvm::Triple::Solaris:
TC = new toolchains::Solaris(*this, Target, Args);
break;
@@ -2673,12 +3735,12 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
break;
}
break;
- case llvm::Triple::CUDA:
- TC = new toolchains::CudaToolChain(*this, Target, Args);
- break;
case llvm::Triple::PS4:
TC = new toolchains::PS4CPU(*this, Target, Args);
break;
+ case llvm::Triple::Contiki:
+ TC = new toolchains::Contiki(*this, Target, Args);
+ break;
default:
// Of these targets, Hexagon is the only one that might have
// an OS of Linux, in which case it got handled above already.
@@ -2686,6 +3748,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::tce:
TC = new toolchains::TCEToolChain(*this, Target, Args);
break;
+ case llvm::Triple::tcele:
+ TC = new toolchains::TCELEToolChain(*this, Target, Args);
+ break;
case llvm::Triple::hexagon:
TC = new toolchains::HexagonToolChain(*this, Target, Args);
break;
@@ -2711,6 +3776,12 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
}
}
}
+
+ // Intentionally omitted from the switch above: llvm::Triple::CUDA. CUDA
+ // compiles always need two toolchains, the CUDA toolchain and the host
+ // toolchain. So the only valid way to create a CUDA toolchain is via
+ // CreateOffloadingDeviceToolChains.
+
return *TC;
}
@@ -2733,36 +3804,35 @@ bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
///
/// \return True if the entire string was parsed (9.2), or all groups were
/// parsed (10.3.5extrastuff).
-bool Driver::GetReleaseVersion(const char *Str, unsigned &Major,
- unsigned &Minor, unsigned &Micro,
- bool &HadExtra) {
+bool Driver::GetReleaseVersion(StringRef Str, unsigned &Major, unsigned &Minor,
+ unsigned &Micro, bool &HadExtra) {
HadExtra = false;
Major = Minor = Micro = 0;
- if (*Str == '\0')
+ if (Str.empty())
return false;
- char *End;
- Major = (unsigned)strtol(Str, &End, 10);
- if (*Str != '\0' && *End == '\0')
+ if (Str.consumeInteger(10, Major))
+ return false;
+ if (Str.empty())
return true;
- if (*End != '.')
+ if (Str[0] != '.')
return false;
- Str = End + 1;
- Minor = (unsigned)strtol(Str, &End, 10);
- if (*Str != '\0' && *End == '\0')
+ Str = Str.drop_front(1);
+
+ if (Str.consumeInteger(10, Minor))
+ return false;
+ if (Str.empty())
return true;
- if (*End != '.')
+ if (Str[0] != '.')
return false;
+ Str = Str.drop_front(1);
- Str = End + 1;
- Micro = (unsigned)strtol(Str, &End, 10);
- if (*Str != '\0' && *End == '\0')
- return true;
- if (Str == End)
+ if (Str.consumeInteger(10, Micro))
return false;
- HadExtra = true;
+ if (!Str.empty())
+ HadExtra = true;
return true;
}
@@ -2772,21 +3842,22 @@ bool Driver::GetReleaseVersion(const char *Str, unsigned &Major,
///
/// \return True if the entire string was parsed and there are
/// no extra characters remaining at the end.
-bool Driver::GetReleaseVersion(const char *Str,
+bool Driver::GetReleaseVersion(StringRef Str,
MutableArrayRef<unsigned> Digits) {
- if (*Str == '\0')
+ if (Str.empty())
return false;
- char *End;
unsigned CurDigit = 0;
while (CurDigit < Digits.size()) {
- unsigned Digit = (unsigned)strtol(Str, &End, 10);
+ unsigned Digit;
+ if (Str.consumeInteger(10, Digit))
+ return false;
Digits[CurDigit] = Digit;
- if (*Str != '\0' && *End == '\0')
+ if (Str.empty())
return true;
- if (*End != '.' || Str == End)
+ if (Str[0] != '.')
return false;
- Str = End + 1;
+ Str = Str.drop_front(1);
CurDigit++;
}
diff --git a/lib/Driver/Job.cpp b/lib/Driver/Job.cpp
index 2d99b1f22385..9fd8808af302 100644
--- a/lib/Driver/Job.cpp
+++ b/lib/Driver/Job.cpp
@@ -7,18 +7,20 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Driver/Job.h"
#include "InputInfo.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
-#include "clang/Driver/Job.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/Program.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
@@ -37,50 +39,62 @@ Command::Command(const Action &Source, const Tool &Creator,
InputFilenames.push_back(II.getFilename());
}
-static int skipArgs(const char *Flag, bool HaveCrashVFS) {
+/// @brief Check if the compiler flag in question should be skipped when
+/// emitting a reproducer. Also track how many arguments it has and if the
+/// option is some kind of include path.
+static bool skipArgs(const char *Flag, bool HaveCrashVFS, int &SkipNum,
+ bool &IsInclude) {
+ SkipNum = 2;
// These flags are all of the form -Flag <Arg> and are treated as two
// arguments. Therefore, we need to skip the flag and the next argument.
- bool Res = llvm::StringSwitch<bool>(Flag)
+ bool ShouldSkip = llvm::StringSwitch<bool>(Flag)
.Cases("-MF", "-MT", "-MQ", "-serialize-diagnostic-file", true)
.Cases("-o", "-coverage-file", "-dependency-file", true)
- .Cases("-fdebug-compilation-dir", "-idirafter", true)
- .Cases("-include", "-include-pch", "-internal-isystem", true)
- .Cases("-internal-externc-isystem", "-iprefix", "-iwithprefix", true)
- .Cases("-iwithprefixbefore", "-isystem", "-iquote", true)
+ .Cases("-fdebug-compilation-dir", "-diagnostic-log-file", true)
.Cases("-dwarf-debug-flags", "-ivfsoverlay", true)
- .Cases("-header-include-file", "-diagnostic-log-file", true)
- // Some include flags shouldn't be skipped if we have a crash VFS
- .Cases("-isysroot", "-I", "-F", "-resource-dir", !HaveCrashVFS)
.Default(false);
-
- // Match found.
- if (Res)
- return 2;
+ if (ShouldSkip)
+ return true;
+
+ // Some include flags shouldn't be skipped if we have a crash VFS
+ IsInclude = llvm::StringSwitch<bool>(Flag)
+ .Cases("-include", "-header-include-file", true)
+ .Cases("-idirafter", "-internal-isystem", "-iwithprefix", true)
+ .Cases("-internal-externc-isystem", "-iprefix", true)
+ .Cases("-iwithprefixbefore", "-isystem", "-iquote", true)
+ .Cases("-isysroot", "-I", "-F", "-resource-dir", true)
+ .Cases("-iframework", "-include-pch", true)
+ .Default(false);
+ if (IsInclude)
+ return HaveCrashVFS ? false : true;
// The remaining flags are treated as a single argument.
// These flags are all of the form -Flag and have no second argument.
- Res = llvm::StringSwitch<bool>(Flag)
+ ShouldSkip = llvm::StringSwitch<bool>(Flag)
.Cases("-M", "-MM", "-MG", "-MP", "-MD", true)
.Case("-MMD", true)
.Default(false);
// Match found.
- if (Res)
- return 1;
+ SkipNum = 1;
+ if (ShouldSkip)
+ return true;
// These flags are treated as a single argument (e.g., -F<Dir>).
StringRef FlagRef(Flag);
- if ((!HaveCrashVFS &&
- (FlagRef.startswith("-F") || FlagRef.startswith("-I"))) ||
- FlagRef.startswith("-fmodules-cache-path="))
- return 1;
-
- return 0;
+ IsInclude = FlagRef.startswith("-F") || FlagRef.startswith("-I");
+ if (IsInclude)
+ return HaveCrashVFS ? false : true;
+ if (FlagRef.startswith("-fmodules-cache-path="))
+ return true;
+
+ SkipNum = 0;
+ return false;
}
-void Command::printArg(raw_ostream &OS, const char *Arg, bool Quote) {
- const bool Escape = std::strpbrk(Arg, "\"\\$");
+void Command::printArg(raw_ostream &OS, StringRef Arg, bool Quote) {
+ const bool Escape = Arg.find_first_of("\"\\$") != StringRef::npos;
if (!Quote && !Escape) {
OS << Arg;
@@ -89,7 +103,7 @@ void Command::printArg(raw_ostream &OS, const char *Arg, bool Quote) {
// Quote and escape. This isn't really complete, but good enough.
OS << '"';
- while (const char c = *Arg++) {
+ for (const char c : Arg) {
if (c == '"' || c == '\\' || c == '$')
OS << '\\';
OS << c;
@@ -152,6 +166,45 @@ void Command::buildArgvForResponseFile(
}
}
+/// @brief Rewrite relative include-like flag paths to absolute ones.
+static void
+rewriteIncludes(const llvm::ArrayRef<const char *> &Args, size_t Idx,
+ size_t NumArgs,
+ llvm::SmallVectorImpl<llvm::SmallString<128>> &IncFlags) {
+ using namespace llvm;
+ using namespace sys;
+ auto getAbsPath = [](StringRef InInc, SmallVectorImpl<char> &OutInc) -> bool {
+ if (path::is_absolute(InInc)) // Nothing to do here...
+ return false;
+ std::error_code EC = fs::current_path(OutInc);
+ if (EC)
+ return false;
+ path::append(OutInc, InInc);
+ return true;
+ };
+
+ SmallString<128> NewInc;
+ if (NumArgs == 1) {
+ StringRef FlagRef(Args[Idx + NumArgs - 1]);
+ assert((FlagRef.startswith("-F") || FlagRef.startswith("-I")) &&
+ "Expecting -I or -F");
+ StringRef Inc = FlagRef.slice(2, StringRef::npos);
+ if (getAbsPath(Inc, NewInc)) {
+ SmallString<128> NewArg(FlagRef.slice(0, 2));
+ NewArg += NewInc;
+ IncFlags.push_back(std::move(NewArg));
+ }
+ return;
+ }
+
+ assert(NumArgs == 2 && "Not expecting more than two arguments");
+ StringRef Inc(Args[Idx + NumArgs - 1]);
+ if (!getAbsPath(Inc, NewInc))
+ return;
+ IncFlags.push_back(SmallString<128>(Args[Idx]));
+ IncFlags.push_back(std::move(NewInc));
+}
+
void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
CrashReportInfo *CrashInfo) const {
// Always quote the exe.
@@ -170,10 +223,27 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
const char *const Arg = Args[i];
if (CrashInfo) {
- if (int Skip = skipArgs(Arg, HaveCrashVFS)) {
- i += Skip - 1;
+ int NumArgs = 0;
+ bool IsInclude = false;
+ if (skipArgs(Arg, HaveCrashVFS, NumArgs, IsInclude)) {
+ i += NumArgs - 1;
continue;
}
+
+ // Relative includes need to be expanded to absolute paths.
+ if (HaveCrashVFS && IsInclude) {
+ SmallVector<SmallString<128>, 2> NewIncFlags;
+ rewriteIncludes(Args, i, NumArgs, NewIncFlags);
+ if (!NewIncFlags.empty()) {
+ for (auto &F : NewIncFlags) {
+ OS << ' ';
+ printArg(OS, F.c_str(), Quote);
+ }
+ i += NumArgs - 1;
+ continue;
+ }
+ }
+
auto Found = std::find_if(InputFilenames.begin(), InputFilenames.end(),
[&Arg](StringRef IF) { return IF == Arg; });
if (Found != InputFilenames.end() &&
@@ -181,7 +251,7 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
// Replace the input file name with the crashinfo's file name.
OS << ' ';
StringRef ShortName = llvm::sys::path::filename(CrashInfo->Filename);
- printArg(OS, ShortName.str().c_str(), Quote);
+ printArg(OS, ShortName.str(), Quote);
continue;
}
}
@@ -194,19 +264,22 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
OS << ' ';
printArg(OS, "-ivfsoverlay", Quote);
OS << ' ';
- printArg(OS, CrashInfo->VFSPath.str().c_str(), Quote);
+ printArg(OS, CrashInfo->VFSPath.str(), Quote);
- // Insert -fmodules-cache-path and use the relative module directory
- // <name>.cache/vfs/modules where we already dumped the modules.
+ // The leftover modules from the crash are stored in
+ // <name>.cache/vfs/modules
+ // Leave it untouched for pcm inspection and provide a clean/empty dir
+ // path to contain the future generated module cache:
+ // <name>.cache/vfs/repro-modules
SmallString<128> RelModCacheDir = llvm::sys::path::parent_path(
llvm::sys::path::parent_path(CrashInfo->VFSPath));
- llvm::sys::path::append(RelModCacheDir, "modules");
+ llvm::sys::path::append(RelModCacheDir, "repro-modules");
std::string ModCachePath = "-fmodules-cache-path=";
ModCachePath.append(RelModCacheDir.c_str());
OS << ' ';
- printArg(OS, ModCachePath.c_str(), Quote);
+ printArg(OS, ModCachePath, Quote);
}
if (ResponseFile != nullptr) {
diff --git a/lib/Driver/MSVCToolChain.cpp b/lib/Driver/MSVCToolChain.cpp
index b8de5ad49182..95cf056f7a74 100644
--- a/lib/Driver/MSVCToolChain.cpp
+++ b/lib/Driver/MSVCToolChain.cpp
@@ -16,12 +16,14 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include <cstdio>
@@ -113,6 +115,9 @@ static bool readFullStringValue(HKEY hkey, const char *valueName,
if (result == ERROR_SUCCESS) {
std::wstring WideValue(reinterpret_cast<const wchar_t *>(buffer.data()),
valueSize / sizeof(wchar_t));
+ if (valueSize && WideValue.back() == L'\0') {
+ WideValue.pop_back();
+ }
// The destination buffer must be empty as an invariant of the conversion
// function; but this function is sometimes called in a loop that passes in
// the same buffer, however. Simply clear it out so we can overwrite it.
@@ -190,8 +195,7 @@ static bool getSystemRegistryString(const char *keyPath, const char *valueName,
lResult = RegOpenKeyExA(hTopKey, bestName.c_str(), 0,
KEY_READ | KEY_WOW64_32KEY, &hKey);
if (lResult == ERROR_SUCCESS) {
- lResult = readFullStringValue(hKey, valueName, value);
- if (lResult == ERROR_SUCCESS) {
+ if (readFullStringValue(hKey, valueName, value)) {
bestValue = dvalue;
if (phValue)
*phValue = bestName;
@@ -208,8 +212,7 @@ static bool getSystemRegistryString(const char *keyPath, const char *valueName,
lResult =
RegOpenKeyExA(hRootKey, keyPath, 0, KEY_READ | KEY_WOW64_32KEY, &hKey);
if (lResult == ERROR_SUCCESS) {
- lResult = readFullStringValue(hKey, valueName, value);
- if (lResult == ERROR_SUCCESS)
+ if (readFullStringValue(hKey, valueName, value))
returnValue = true;
if (phValue)
phValue->clear();
@@ -470,6 +473,14 @@ bool MSVCToolChain::getVisualStudioBinariesFolder(const char *clangProgramPath,
return true;
}
+VersionTuple MSVCToolChain::getMSVCVersionFromTriple() const {
+ unsigned Major, Minor, Micro;
+ getTriple().getEnvironmentVersion(Major, Minor, Micro);
+ if (Major || Minor || Micro)
+ return VersionTuple(Major, Minor, Micro);
+ return VersionTuple();
+}
+
VersionTuple MSVCToolChain::getMSVCVersionFromExe() const {
VersionTuple Version;
#ifdef USE_WIN32
@@ -512,9 +523,9 @@ VersionTuple MSVCToolChain::getMSVCVersionFromExe() const {
// Get Visual Studio installation directory.
bool MSVCToolChain::getVisualStudioInstallDir(std::string &path) const {
// First check the environment variables that vsvars32.bat sets.
- const char *vcinstalldir = getenv("VCINSTALLDIR");
- if (vcinstalldir) {
- path = vcinstalldir;
+ if (llvm::Optional<std::string> VcInstallDir =
+ llvm::sys::Process::GetEnv("VCINSTALLDIR")) {
+ path = std::move(*VcInstallDir);
path = path.substr(0, path.find("\\VC"));
return true;
}
@@ -540,26 +551,26 @@ bool MSVCToolChain::getVisualStudioInstallDir(std::string &path) const {
}
// Try the environment.
- const char *vs120comntools = getenv("VS120COMNTOOLS");
- const char *vs100comntools = getenv("VS100COMNTOOLS");
- const char *vs90comntools = getenv("VS90COMNTOOLS");
- const char *vs80comntools = getenv("VS80COMNTOOLS");
-
- const char *vscomntools = nullptr;
-
- // Find any version we can
- if (vs120comntools)
- vscomntools = vs120comntools;
- else if (vs100comntools)
- vscomntools = vs100comntools;
- else if (vs90comntools)
- vscomntools = vs90comntools;
- else if (vs80comntools)
- vscomntools = vs80comntools;
-
- if (vscomntools && *vscomntools) {
- const char *p = strstr(vscomntools, "\\Common7\\Tools");
- path = p ? std::string(vscomntools, p) : vscomntools;
+ std::string vcomntools;
+ if (llvm::Optional<std::string> vs120comntools =
+ llvm::sys::Process::GetEnv("VS120COMNTOOLS"))
+ vcomntools = std::move(*vs120comntools);
+ else if (llvm::Optional<std::string> vs100comntools =
+ llvm::sys::Process::GetEnv("VS100COMNTOOLS"))
+ vcomntools = std::move(*vs100comntools);
+ else if (llvm::Optional<std::string> vs90comntools =
+ llvm::sys::Process::GetEnv("VS90COMNTOOLS"))
+ vcomntools = std::move(*vs90comntools);
+ else if (llvm::Optional<std::string> vs80comntools =
+ llvm::sys::Process::GetEnv("VS80COMNTOOLS"))
+ vcomntools = std::move(*vs80comntools);
+
+ // Find any version we can.
+ if (!vcomntools.empty()) {
+ size_t p = vcomntools.find("\\Common7\\Tools");
+ if (p != std::string::npos)
+ vcomntools.resize(p);
+ path = std::move(vcomntools);
return true;
}
return false;
@@ -592,9 +603,10 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
// Honor %INCLUDE%. It should know essential search paths with vcvarsall.bat.
- if (const char *cl_include_dir = getenv("INCLUDE")) {
+ if (llvm::Optional<std::string> cl_include_dir =
+ llvm::sys::Process::GetEnv("INCLUDE")) {
SmallVector<StringRef, 8> Dirs;
- StringRef(cl_include_dir)
+ StringRef(*cl_include_dir)
.split(Dirs, ";", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
for (StringRef Dir : Dirs)
addSystemInclude(DriverArgs, CC1Args, Dir);
@@ -646,6 +658,7 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
}
+#if defined(LLVM_ON_WIN32)
// As a fallback, select default install paths.
// FIXME: Don't guess drives and paths like this on Windows.
const StringRef Paths[] = {
@@ -656,6 +669,7 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
"C:/Program Files/Microsoft Visual Studio 8/VC/PlatformSDK/Include"
};
addSystemIncludes(DriverArgs, CC1Args, Paths);
+#endif
}
void MSVCToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
@@ -663,21 +677,34 @@ void MSVCToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
// FIXME: There should probably be logic here to find libc++ on Windows.
}
+VersionTuple MSVCToolChain::computeMSVCVersion(const Driver *D,
+ const ArgList &Args) const {
+ bool IsWindowsMSVC = getTriple().isWindowsMSVCEnvironment();
+ VersionTuple MSVT = ToolChain::computeMSVCVersion(D, Args);
+ if (MSVT.empty()) MSVT = getMSVCVersionFromTriple();
+ if (MSVT.empty() && IsWindowsMSVC) MSVT = getMSVCVersionFromExe();
+ if (MSVT.empty() &&
+ Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
+ IsWindowsMSVC)) {
+ // -fms-compatibility-version=18.00 is default.
+ // FIXME: Consider bumping this to 19 (MSVC2015) soon.
+ MSVT = VersionTuple(18);
+ }
+ return MSVT;
+}
+
std::string
MSVCToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
types::ID InputType) const {
- std::string TripleStr =
- ToolChain::ComputeEffectiveClangTriple(Args, InputType);
- llvm::Triple Triple(TripleStr);
- VersionTuple MSVT =
- tools::visualstudio::getMSVCVersion(/*D=*/nullptr, *this, Triple, Args,
- /*IsWindowsMSVC=*/true);
- if (MSVT.empty())
- return TripleStr;
-
+ // The MSVC version doesn't care about the architecture, even though it
+ // may look at the triple internally.
+ VersionTuple MSVT = computeMSVCVersion(/*D=*/nullptr, Args);
MSVT = VersionTuple(MSVT.getMajor(), MSVT.getMinor().getValueOr(0),
MSVT.getSubminor().getValueOr(0));
+ // For the rest of the triple, however, a computed architecture name may
+ // be needed.
+ llvm::Triple Triple(ToolChain::ComputeEffectiveClangTriple(Args, InputType));
if (Triple.getEnvironment() == llvm::Triple::MSVC) {
StringRef ObjFmt = Triple.getEnvironmentName().split('-').second;
if (ObjFmt.empty())
@@ -806,7 +833,7 @@ static void TranslateDArg(Arg *A, llvm::opt::DerivedArgList &DAL,
llvm::opt::DerivedArgList *
MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
- const char *BoundArch) const {
+ StringRef BoundArch, Action::OffloadKind) const {
DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
const OptTable &Opts = getDriver().getOpts();
diff --git a/lib/Driver/Multilib.cpp b/lib/Driver/Multilib.cpp
index 34ad6a7efb24..a88edf7f04e3 100644
--- a/lib/Driver/Multilib.cpp
+++ b/lib/Driver/Multilib.cpp
@@ -13,12 +13,10 @@
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Option/Option.h"
-#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Regex.h"
#include "llvm/Support/YAMLParser.h"
diff --git a/lib/Driver/SanitizerArgs.cpp b/lib/Driver/SanitizerArgs.cpp
index 30cc3f45c9e0..f4f6dad9f287 100644
--- a/lib/Driver/SanitizerArgs.cpp
+++ b/lib/Driver/SanitizerArgs.cpp
@@ -49,8 +49,11 @@ enum CoverageFeature {
CoverageIndirCall = 1 << 3,
CoverageTraceBB = 1 << 4,
CoverageTraceCmp = 1 << 5,
- Coverage8bitCounters = 1 << 6,
- CoverageTracePC = 1 << 7,
+ CoverageTraceDiv = 1 << 6,
+ CoverageTraceGep = 1 << 7,
+ Coverage8bitCounters = 1 << 8,
+ CoverageTracePC = 1 << 9,
+ CoverageTracePCGuard = 1 << 10,
};
/// Parse a -fsanitize= or -fno-sanitize= argument's values, diagnosing any
@@ -162,7 +165,8 @@ bool SanitizerArgs::needsUbsanRt() const {
return ((Sanitizers.Mask & NeedsUbsanRt & ~TrapSanitizers.Mask) ||
CoverageFeatures) &&
!Sanitizers.has(Address) && !Sanitizers.has(Memory) &&
- !Sanitizers.has(Thread) && !Sanitizers.has(DataFlow) && !CfiCrossDso;
+ !Sanitizers.has(Thread) && !Sanitizers.has(DataFlow) &&
+ !Sanitizers.has(Leak) && !CfiCrossDso;
}
bool SanitizerArgs::needsCfiRt() const {
@@ -434,6 +438,18 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
TC.getTriple().getArch() == llvm::Triple::x86_64);
}
+ if (AllAddedKinds & Thread) {
+ TsanMemoryAccess = Args.hasFlag(options::OPT_fsanitize_thread_memory_access,
+ options::OPT_fno_sanitize_thread_memory_access,
+ TsanMemoryAccess);
+ TsanFuncEntryExit = Args.hasFlag(options::OPT_fsanitize_thread_func_entry_exit,
+ options::OPT_fno_sanitize_thread_func_entry_exit,
+ TsanFuncEntryExit);
+ TsanAtomics = Args.hasFlag(options::OPT_fsanitize_thread_atomics,
+ options::OPT_fno_sanitize_thread_atomics,
+ TsanAtomics);
+ }
+
if (AllAddedKinds & CFI) {
CfiCrossDso = Args.hasFlag(options::OPT_fsanitize_cfi_cross_dso,
options::OPT_fno_sanitize_cfi_cross_dso, false);
@@ -524,7 +540,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
<< "-fsanitize-coverage=8bit-counters"
<< "-fsanitize-coverage=(func|bb|edge)";
// trace-pc w/o func/bb/edge implies edge.
- if ((CoverageFeatures & CoverageTracePC) &&
+ if ((CoverageFeatures & (CoverageTracePC | CoverageTracePCGuard)) &&
!(CoverageFeatures & CoverageTypes))
CoverageFeatures |= CoverageEdge;
@@ -556,14 +572,13 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
D.Diag(clang::diag::note_drv_address_sanitizer_debug_runtime);
}
}
- }
- AsanUseAfterScope =
- Args.hasArg(options::OPT_fsanitize_address_use_after_scope);
- if (AsanUseAfterScope && !(AllAddedKinds & Address)) {
- D.Diag(clang::diag::err_drv_argument_only_allowed_with)
- << "-fsanitize-address-use-after-scope"
- << "-fsanitize=address";
+ if (Arg *A = Args.getLastArg(
+ options::OPT_fsanitize_address_use_after_scope,
+ options::OPT_fno_sanitize_address_use_after_scope)) {
+ AsanUseAfterScope = A->getOption().getID() ==
+ options::OPT_fsanitize_address_use_after_scope;
+ }
}
// Parse -link-cxx-sanitizer flag.
@@ -605,6 +620,12 @@ static void addIncludeLinkerOption(const ToolChain &TC,
void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
types::ID InputType) const {
+ // NVPTX doesn't currently support sanitizers. Bailing out here means that
+ // e.g. -fsanitize=address applies only to host code, which is what we want
+ // for now.
+ if (TC.getTriple().isNVPTX())
+ return;
+
// Translate available CoverageFeatures to corresponding clang-cc1 flags.
// Do it even if Sanitizers.empty() since some forms of coverage don't require
// sanitizers.
@@ -615,8 +636,11 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
std::make_pair(CoverageIndirCall, "-fsanitize-coverage-indirect-calls"),
std::make_pair(CoverageTraceBB, "-fsanitize-coverage-trace-bb"),
std::make_pair(CoverageTraceCmp, "-fsanitize-coverage-trace-cmp"),
+ std::make_pair(CoverageTraceDiv, "-fsanitize-coverage-trace-div"),
+ std::make_pair(CoverageTraceGep, "-fsanitize-coverage-trace-gep"),
std::make_pair(Coverage8bitCounters, "-fsanitize-coverage-8bit-counters"),
- std::make_pair(CoverageTracePC, "-fsanitize-coverage-trace-pc")};
+ std::make_pair(CoverageTracePC, "-fsanitize-coverage-trace-pc"),
+ std::make_pair(CoverageTracePCGuard, "-fsanitize-coverage-trace-pc-guard")};
for (auto F : CoverageFlags) {
if (CoverageFeatures & F.first)
CmdArgs.push_back(Args.MakeArgString(F.second));
@@ -674,6 +698,22 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
if (MsanUseAfterDtor)
CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-use-after-dtor"));
+ // FIXME: Pass these parameters as function attributes, not as -llvm flags.
+ if (!TsanMemoryAccess) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-tsan-instrument-memory-accesses=0");
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-tsan-instrument-memintrinsics=0");
+ }
+ if (!TsanFuncEntryExit) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-tsan-instrument-func-entry-exit=0");
+ }
+ if (!TsanAtomics) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-tsan-instrument-atomics=0");
+ }
+
if (CfiCrossDso)
CmdArgs.push_back(Args.MakeArgString("-fsanitize-cfi-cross-dso"));
@@ -752,8 +792,11 @@ int parseCoverageFeatures(const Driver &D, const llvm::opt::Arg *A) {
.Case("indirect-calls", CoverageIndirCall)
.Case("trace-bb", CoverageTraceBB)
.Case("trace-cmp", CoverageTraceCmp)
+ .Case("trace-div", CoverageTraceDiv)
+ .Case("trace-gep", CoverageTraceGep)
.Case("8bit-counters", Coverage8bitCounters)
.Case("trace-pc", CoverageTracePC)
+ .Case("trace-pc-guard", CoverageTracePCGuard)
.Default(0);
if (F == 0)
D.Diag(clang::diag::err_drv_unsupported_option_argument)
diff --git a/lib/Driver/Tool.cpp b/lib/Driver/Tool.cpp
index 7142e822f16e..818494662179 100644
--- a/lib/Driver/Tool.cpp
+++ b/lib/Driver/Tool.cpp
@@ -8,6 +8,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Tool.h"
+#include "InputInfo.h"
using namespace clang::driver;
@@ -21,3 +22,12 @@ Tool::Tool(const char *_Name, const char *_ShortName, const ToolChain &TC,
Tool::~Tool() {
}
+
+void Tool::ConstructJobMultipleOutputs(Compilation &C, const JobAction &JA,
+ const InputInfoList &Outputs,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const {
+ assert(Outputs.size() == 1 && "Expected only one output by default!");
+ ConstructJob(C, JA, Outputs.front(), Inputs, TCArgs, LinkingOutput);
+}
diff --git a/lib/Driver/ToolChain.cpp b/lib/Driver/ToolChain.cpp
index e96688cbaf81..6adc0386ee7b 100644
--- a/lib/Driver/ToolChain.cpp
+++ b/lib/Driver/ToolChain.cpp
@@ -7,6 +7,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Driver/ToolChain.h"
#include "Tools.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Config/config.h"
@@ -15,16 +16,15 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
-#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/TargetRegistry.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -68,7 +68,8 @@ static ToolChain::RTTIMode CalculateRTTIMode(const ArgList &Args,
ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
const ArgList &Args)
: D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
- CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) {
+ CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)),
+ EffectiveTriple() {
if (Arg *A = Args.getLastArg(options::OPT_mthread_model))
if (!isThreadModelSupported(A->getValue()))
D.Diag(diag::err_drv_invalid_thread_model_for_target)
@@ -238,6 +239,12 @@ Tool *ToolChain::getLink() const {
return Link.get();
}
+Tool *ToolChain::getOffloadBundler() const {
+ if (!OffloadBundler)
+ OffloadBundler.reset(new tools::OffloadBundler(*this));
+ return OffloadBundler.get();
+}
+
Tool *ToolChain::getTool(Action::ActionClass AC) const {
switch (AC) {
case Action::AssembleJobClass:
@@ -262,6 +269,10 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::VerifyPCHJobClass:
case Action::BackendJobClass:
return getClang();
+
+ case Action::OffloadBundlingJobClass:
+ case Action::OffloadUnbundlingJobClass:
+ return getOffloadBundler();
}
llvm_unreachable("Invalid tool kind.");
@@ -340,36 +351,34 @@ std::string ToolChain::GetProgramPath(const char *Name) const {
}
std::string ToolChain::GetLinkerPath() const {
- if (Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ)) {
- StringRef UseLinker = A->getValue();
-
- if (llvm::sys::path::is_absolute(UseLinker)) {
- // If we're passed -fuse-ld= with what looks like an absolute path,
- // don't attempt to second-guess that.
- if (llvm::sys::fs::exists(UseLinker))
- return UseLinker;
- } else {
- // If we're passed -fuse-ld= with no argument, or with the argument ld,
- // then use whatever the default system linker is.
- if (UseLinker.empty() || UseLinker == "ld")
- return GetProgramPath("ld");
-
- llvm::SmallString<8> LinkerName("ld.");
- LinkerName.append(UseLinker);
-
- std::string LinkerPath(GetProgramPath(LinkerName.c_str()));
- if (llvm::sys::fs::exists(LinkerPath))
- return LinkerPath;
- }
+ const Arg* A = Args.getLastArg(options::OPT_fuse_ld_EQ);
+ StringRef UseLinker = A ? A->getValue() : CLANG_DEFAULT_LINKER;
+
+ if (llvm::sys::path::is_absolute(UseLinker)) {
+ // If we're passed what looks like an absolute path, don't attempt to
+ // second-guess that.
+ if (llvm::sys::fs::exists(UseLinker))
+ return UseLinker;
+ } else if (UseLinker.empty() || UseLinker == "ld") {
+ // If we're passed -fuse-ld= with no argument, or with the argument ld,
+ // then use whatever the default system linker is.
+ return GetProgramPath(getDefaultLinker());
+ } else {
+ llvm::SmallString<8> LinkerName("ld.");
+ LinkerName.append(UseLinker);
+
+ std::string LinkerPath(GetProgramPath(LinkerName.c_str()));
+ if (llvm::sys::fs::exists(LinkerPath))
+ return LinkerPath;
+ }
+ if (A)
getDriver().Diag(diag::err_drv_invalid_linker_name) << A->getAsString(Args);
- return "";
- }
- return GetProgramPath(DefaultLinker);
+ return GetProgramPath(getDefaultLinker());
}
-types::ID ToolChain::LookupTypeForExtension(const char *Ext) const {
+types::ID ToolChain::LookupTypeForExtension(StringRef Ext) const {
return types::lookupTypeForExtension(Ext);
}
@@ -487,8 +496,10 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
ArchName = "arm";
// Assembly files should start in ARM mode, unless arch is M-profile.
+ // Windows is always thumb.
if ((InputType != types::TY_PP_Asm && Args.hasFlag(options::OPT_mthumb,
- options::OPT_mno_thumb, ThumbDefault)) || IsMProfile) {
+ options::OPT_mno_thumb, ThumbDefault)) || IsMProfile ||
+ getTriple().isOSWindows()) {
if (IsBigEndian)
ArchName = "thumbeb";
else
@@ -526,54 +537,39 @@ void ToolChain::addProfileRTLibs(const llvm::opt::ArgList &Args,
ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
const ArgList &Args) const {
- if (Arg *A = Args.getLastArg(options::OPT_rtlib_EQ)) {
- StringRef Value = A->getValue();
- if (Value == "compiler-rt")
- return ToolChain::RLT_CompilerRT;
- if (Value == "libgcc")
- return ToolChain::RLT_Libgcc;
- getDriver().Diag(diag::err_drv_invalid_rtlib_name)
- << A->getAsString(Args);
- }
+ const Arg* A = Args.getLastArg(options::OPT_rtlib_EQ);
+ StringRef LibName = A ? A->getValue() : CLANG_DEFAULT_RTLIB;
- return GetDefaultRuntimeLibType();
-}
+ // Only use "platform" in tests to override CLANG_DEFAULT_RTLIB!
+ if (LibName == "compiler-rt")
+ return ToolChain::RLT_CompilerRT;
+ else if (LibName == "libgcc")
+ return ToolChain::RLT_Libgcc;
+ else if (LibName == "platform")
+ return GetDefaultRuntimeLibType();
-static bool ParseCXXStdlibType(const StringRef& Name,
- ToolChain::CXXStdlibType& Type) {
- if (Name == "libc++")
- Type = ToolChain::CST_Libcxx;
- else if (Name == "libstdc++")
- Type = ToolChain::CST_Libstdcxx;
- else
- return false;
+ if (A)
+ getDriver().Diag(diag::err_drv_invalid_rtlib_name) << A->getAsString(Args);
- return true;
+ return GetDefaultRuntimeLibType();
}
ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
- ToolChain::CXXStdlibType Type;
- bool HasValidType = false;
- bool ForcePlatformDefault = false;
-
const Arg *A = Args.getLastArg(options::OPT_stdlib_EQ);
- if (A) {
- StringRef Value = A->getValue();
- HasValidType = ParseCXXStdlibType(Value, Type);
-
- // Only use in tests to override CLANG_DEFAULT_CXX_STDLIB!
- if (Value == "platform")
- ForcePlatformDefault = true;
- else if (!HasValidType)
- getDriver().Diag(diag::err_drv_invalid_stdlib_name)
- << A->getAsString(Args);
- }
+ StringRef LibName = A ? A->getValue() : CLANG_DEFAULT_CXX_STDLIB;
+
+ // Only use "platform" in tests to override CLANG_DEFAULT_CXX_STDLIB!
+ if (LibName == "libc++")
+ return ToolChain::CST_Libcxx;
+ else if (LibName == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ else if (LibName == "platform")
+ return GetDefaultCXXStdlibType();
- if (!HasValidType && (ForcePlatformDefault ||
- !ParseCXXStdlibType(CLANG_DEFAULT_CXX_STDLIB, Type)))
- Type = GetDefaultCXXStdlibType();
+ if (A)
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args);
- return Type;
+ return GetDefaultCXXStdlibType();
}
/// \brief Utility function to add a system include directory to CC1 arguments.
@@ -688,7 +684,11 @@ SanitizerMask ToolChain::getSupportedSanitizers() const {
SanitizerMask Res = (Undefined & ~Vptr & ~Function) | (CFI & ~CFIICall) |
CFICastStrict | UnsignedIntegerOverflow | LocalBounds;
if (getTriple().getArch() == llvm::Triple::x86 ||
- getTriple().getArch() == llvm::Triple::x86_64)
+ getTriple().getArch() == llvm::Triple::x86_64 ||
+ getTriple().getArch() == llvm::Triple::arm ||
+ getTriple().getArch() == llvm::Triple::aarch64 ||
+ getTriple().getArch() == llvm::Triple::wasm32 ||
+ getTriple().getArch() == llvm::Triple::wasm64)
Res |= CFIICall;
return Res;
}
@@ -698,3 +698,57 @@ void ToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
void ToolChain::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {}
+
+static VersionTuple separateMSVCFullVersion(unsigned Version) {
+ if (Version < 100)
+ return VersionTuple(Version);
+
+ if (Version < 10000)
+ return VersionTuple(Version / 100, Version % 100);
+
+ unsigned Build = 0, Factor = 1;
+ for (; Version > 10000; Version = Version / 10, Factor = Factor * 10)
+ Build = Build + (Version % 10) * Factor;
+ return VersionTuple(Version / 100, Version % 100, Build);
+}
+
+VersionTuple
+ToolChain::computeMSVCVersion(const Driver *D,
+ const llvm::opt::ArgList &Args) const {
+ const Arg *MSCVersion = Args.getLastArg(options::OPT_fmsc_version);
+ const Arg *MSCompatibilityVersion =
+ Args.getLastArg(options::OPT_fms_compatibility_version);
+
+ if (MSCVersion && MSCompatibilityVersion) {
+ if (D)
+ D->Diag(diag::err_drv_argument_not_allowed_with)
+ << MSCVersion->getAsString(Args)
+ << MSCompatibilityVersion->getAsString(Args);
+ return VersionTuple();
+ }
+
+ if (MSCompatibilityVersion) {
+ VersionTuple MSVT;
+ if (MSVT.tryParse(MSCompatibilityVersion->getValue())) {
+ if (D)
+ D->Diag(diag::err_drv_invalid_value)
+ << MSCompatibilityVersion->getAsString(Args)
+ << MSCompatibilityVersion->getValue();
+ } else {
+ return MSVT;
+ }
+ }
+
+ if (MSCVersion) {
+ unsigned Version = 0;
+ if (StringRef(MSCVersion->getValue()).getAsInteger(10, Version)) {
+ if (D)
+ D->Diag(diag::err_drv_invalid_value)
+ << MSCVersion->getAsString(Args) << MSCVersion->getValue();
+ } else {
+ return separateMSVCFullVersion(Version);
+ }
+ }
+
+ return VersionTuple();
+}
diff --git a/lib/Driver/ToolChains.cpp b/lib/Driver/ToolChains.cpp
index 1b02f467c141..968b0cb4724a 100644
--- a/lib/Driver/ToolChains.cpp
+++ b/lib/Driver/ToolChains.cpp
@@ -14,6 +14,7 @@
#include "clang/Basic/VirtualFileSystem.h"
#include "clang/Config/config.h" // for GCC_INSTALL_PREFIX
#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Distro.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
@@ -52,9 +53,10 @@ MachO::MachO(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
/// Darwin - Darwin tool chain for i386 and x86_64.
Darwin::Darwin(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
- : MachO(D, Triple, Args), TargetInitialized(false) {}
+ : MachO(D, Triple, Args), TargetInitialized(false),
+ CudaInstallation(D, Triple, Args) {}
-types::ID MachO::LookupTypeForExtension(const char *Ext) const {
+types::ID MachO::LookupTypeForExtension(StringRef Ext) const {
types::ID Ty = types::lookupTypeForExtension(Ext);
// Darwin always preprocesses assembly files (unless -x is used explicitly).
@@ -99,6 +101,11 @@ bool Darwin::hasBlocksRuntime() const {
}
}
+void Darwin::AddCudaIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
+}
+
// This is just a MachO name translation routine and there's no
// way to join this into ARMTargetParser without breaking all
// other assumptions. Maybe MachO should consider standardising
@@ -176,13 +183,6 @@ Darwin::~Darwin() {}
MachO::~MachO() {}
-std::string MachO::ComputeEffectiveClangTriple(const ArgList &Args,
- types::ID InputType) const {
- llvm::Triple Triple(ComputeLLVMTriple(Args, InputType));
-
- return Triple.getTriple();
-}
-
std::string Darwin::ComputeEffectiveClangTriple(const ArgList &Args,
types::ID InputType) const {
llvm::Triple Triple(ComputeLLVMTriple(Args, InputType));
@@ -296,6 +296,14 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString(P));
}
+unsigned DarwinClang::GetDefaultDwarfVersion() const {
+ // Default to use DWARF 2 on OS X 10.10 / iOS 8 and lower.
+ if ((isTargetMacOS() && isMacosxVersionLT(10, 11)) ||
+ (isTargetIOSBased() && isIPhoneOSVersionLT(9)))
+ return 2;
+ return 4;
+}
+
void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
StringRef DarwinLibName, bool AlwaysLink,
bool IsEmbedded, bool AddRPath) const {
@@ -400,17 +408,22 @@ void DarwinClang::AddLinkSanitizerLibArgs(const ArgList &Args,
/*AddRPath*/ true);
}
+ToolChain::RuntimeLibType DarwinClang::GetRuntimeLibType(
+ const ArgList &Args) const {
+ if (Arg* A = Args.getLastArg(options::OPT_rtlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value != "compiler-rt")
+ getDriver().Diag(diag::err_drv_unsupported_rtlib_for_platform)
+ << Value << "darwin";
+ }
+
+ return ToolChain::RLT_CompilerRT;
+}
+
void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- // Darwin only supports the compiler-rt based runtime libraries.
- switch (GetRuntimeLibType(Args)) {
- case ToolChain::RLT_CompilerRT:
- break;
- default:
- getDriver().Diag(diag::err_drv_unsupported_rtlib_for_platform)
- << Args.getLastArg(options::OPT_rtlib_EQ)->getValue() << "darwin";
- return;
- }
+ // Call once to ensure diagnostic is printed if wrong value was specified
+ GetRuntimeLibType(Args);
// Darwin doesn't support real static executables, don't link any runtime
// libraries with -static.
@@ -803,7 +816,8 @@ void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
}
DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
- const char *BoundArch) const {
+ StringRef BoundArch,
+ Action::OffloadKind) const {
DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
const OptTable &Opts = getDriver().getOpts();
@@ -821,7 +835,7 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
llvm::Triple::ArchType XarchArch =
tools::darwin::getArchTypeForMachOArchName(A->getValue(0));
if (!(XarchArch == getArch() ||
- (BoundArch &&
+ (!BoundArch.empty() &&
XarchArch ==
tools::darwin::getArchTypeForMachOArchName(BoundArch))))
continue;
@@ -937,7 +951,7 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
// Add the arch options based on the particular spelling of -arch, to match
// how the driver driver works.
- if (BoundArch) {
+ if (!BoundArch.empty()) {
StringRef Name = BoundArch;
const Option MCpu = Opts.getOption(options::OPT_mcpu_EQ);
const Option MArch = Opts.getOption(options::OPT_march_EQ);
@@ -1032,14 +1046,16 @@ void MachO::AddLinkRuntimeLibArgs(const ArgList &Args,
AddLinkRuntimeLib(Args, CmdArgs, CompilerRT, false, true);
}
-DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
- const char *BoundArch) const {
+DerivedArgList *
+Darwin::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const {
// First get the generic Apple args, before moving onto Darwin-specific ones.
- DerivedArgList *DAL = MachO::TranslateArgs(Args, BoundArch);
+ DerivedArgList *DAL =
+ MachO::TranslateArgs(Args, BoundArch, DeviceOffloadKind);
const OptTable &Opts = getDriver().getOpts();
// If no architecture is bound, none of the translations here are relevant.
- if (!BoundArch)
+ if (BoundArch.empty())
return DAL;
// Add an explicit version min argument for the deployment target. We do this
@@ -1087,6 +1103,18 @@ DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
}
}
+ auto Arch = tools::darwin::getArchTypeForMachOArchName(BoundArch);
+ if ((Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb)) {
+ if (Args.hasFlag(options::OPT_fomit_frame_pointer,
+ options::OPT_fno_omit_frame_pointer, false))
+ getDriver().Diag(clang::diag::warn_drv_unsupported_opt_for_target)
+ << "-fomit-frame-pointer" << BoundArch;
+ if (Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
+ options::OPT_mno_omit_leaf_frame_pointer, false))
+ getDriver().Diag(clang::diag::warn_drv_unsupported_opt_for_target)
+ << "-momit-leaf-frame-pointer" << BoundArch;
+ }
+
return DAL;
}
@@ -1275,6 +1303,10 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
return Res;
}
+void Darwin::printVerboseInfo(raw_ostream &OS) const {
+ CudaInstallation.print(OS);
+}
+
/// Generic_GCC - A tool chain using the 'gcc' command to perform
/// all subcommands; this relies on gcc translating the majority of
/// command line options.
@@ -1420,6 +1452,25 @@ void Generic_GCC::GCCInstallationDetector::init(
}
}
+ // Try to respect gcc-config on Gentoo. However, do that only
+ // if --gcc-toolchain is not provided or equal to the Gentoo install
+ // in /usr. This avoids accidentally enforcing the system GCC version
+ // when using a custom toolchain.
+ if (GCCToolchainDir == "" || GCCToolchainDir == D.SysRoot + "/usr") {
+ for (StringRef CandidateTriple : ExtraTripleAliases) {
+ if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple))
+ return;
+ }
+ for (StringRef CandidateTriple : CandidateTripleAliases) {
+ if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple))
+ return;
+ }
+ for (StringRef CandidateTriple : CandidateBiarchTripleAliases) {
+ if (ScanGentooGccConfig(TargetTriple, Args, CandidateTriple, true))
+ return;
+ }
+ }
+
// Loop over the various components which exist and select the best GCC
// installation available. GCC installs are ranked by version number.
Version = GCCVersion::Parse("0.0.0");
@@ -1518,8 +1569,8 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
"mips-mti-linux-gnu",
"mips-img-linux-gnu"};
static const char *const MIPSELLibDirs[] = {"/lib"};
- static const char *const MIPSELTriples[] = {
- "mipsel-linux-gnu", "mipsel-linux-android", "mips-img-linux-gnu"};
+ static const char *const MIPSELTriples[] = {"mipsel-linux-gnu",
+ "mips-img-linux-gnu"};
static const char *const MIPS64LibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64Triples[] = {
@@ -1528,7 +1579,15 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
static const char *const MIPS64ELLibDirs[] = {"/lib64", "/lib"};
static const char *const MIPS64ELTriples[] = {
"mips64el-linux-gnu", "mips-mti-linux-gnu", "mips-img-linux-gnu",
- "mips64el-linux-android", "mips64el-linux-gnuabi64"};
+ "mips64el-linux-gnuabi64"};
+
+ static const char *const MIPSELAndroidLibDirs[] = {"/lib", "/libr2",
+ "/libr6"};
+ static const char *const MIPSELAndroidTriples[] = {"mipsel-linux-android"};
+ static const char *const MIPS64ELAndroidLibDirs[] = {"/lib64", "/lib",
+ "/libr2", "/libr6"};
+ static const char *const MIPS64ELAndroidTriples[] = {
+ "mips64el-linux-android"};
static const char *const PPCLibDirs[] = {"/lib32", "/lib"};
static const char *const PPCTriples[] = {
@@ -1630,11 +1689,22 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
BiarchTripleAliases.append(begin(MIPS64Triples), end(MIPS64Triples));
break;
case llvm::Triple::mipsel:
- LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
- TripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
- TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
- BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
- BiarchTripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
+ if (TargetTriple.isAndroid()) {
+ LibDirs.append(begin(MIPSELAndroidLibDirs), end(MIPSELAndroidLibDirs));
+ TripleAliases.append(begin(MIPSELAndroidTriples),
+ end(MIPSELAndroidTriples));
+ BiarchLibDirs.append(begin(MIPS64ELAndroidLibDirs),
+ end(MIPS64ELAndroidLibDirs));
+ BiarchTripleAliases.append(begin(MIPS64ELAndroidTriples),
+ end(MIPS64ELAndroidTriples));
+
+ } else {
+ LibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
+ TripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
+ TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ BiarchLibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
+ BiarchTripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
+ }
break;
case llvm::Triple::mips64:
LibDirs.append(begin(MIPS64LibDirs), end(MIPS64LibDirs));
@@ -1643,11 +1713,23 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
break;
case llvm::Triple::mips64el:
- LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
- TripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
- BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
- BiarchTripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
- BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ if (TargetTriple.isAndroid()) {
+ LibDirs.append(begin(MIPS64ELAndroidLibDirs),
+ end(MIPS64ELAndroidLibDirs));
+ TripleAliases.append(begin(MIPS64ELAndroidTriples),
+ end(MIPS64ELAndroidTriples));
+ BiarchLibDirs.append(begin(MIPSELAndroidLibDirs),
+ end(MIPSELAndroidLibDirs));
+ BiarchTripleAliases.append(begin(MIPSELAndroidTriples),
+ end(MIPSELAndroidTriples));
+
+ } else {
+ LibDirs.append(begin(MIPS64ELLibDirs), end(MIPS64ELLibDirs));
+ TripleAliases.append(begin(MIPS64ELTriples), end(MIPS64ELTriples));
+ BiarchLibDirs.append(begin(MIPSELLibDirs), end(MIPSELLibDirs));
+ BiarchTripleAliases.append(begin(MIPSELTriples), end(MIPSELTriples));
+ BiarchTripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
+ }
break;
case llvm::Triple::ppc:
LibDirs.append(begin(PPCLibDirs), end(PPCLibDirs));
@@ -1706,8 +1788,8 @@ static CudaVersion ParseCudaVersionFile(llvm::StringRef V) {
int Major = -1, Minor = -1;
auto First = V.split('.');
auto Second = First.second.split('.');
- if (!First.first.getAsInteger(10, Major) ||
- !Second.first.getAsInteger(10, Minor))
+ if (First.first.getAsInteger(10, Major) ||
+ Second.first.getAsInteger(10, Minor))
return CudaVersion::UNKNOWN;
if (Major == 7 && Minor == 0) {
@@ -1722,10 +1804,10 @@ static CudaVersion ParseCudaVersionFile(llvm::StringRef V) {
return CudaVersion::UNKNOWN;
}
-// \brief -- try common CUDA installation paths looking for files we need for
-// CUDA compilation.
-void Generic_GCC::CudaInstallationDetector::init(
- const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args) {
+CudaInstallationDetector::CudaInstallationDetector(
+ const Driver &D, const llvm::Triple &TargetTriple,
+ const llvm::opt::ArgList &Args)
+ : D(D) {
SmallVector<std::string, 4> CudaPathCandidates;
if (Args.hasArg(options::OPT_cuda_path_EQ))
@@ -1733,8 +1815,7 @@ void Generic_GCC::CudaInstallationDetector::init(
Args.getLastArgValue(options::OPT_cuda_path_EQ));
else {
CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda");
- // FIXME: Uncomment this once we can compile the cuda 8 headers.
- // CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda-8.0");
+ CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda-8.0");
CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda-7.5");
CudaPathCandidates.push_back(D.SysRoot + "/usr/local/cuda-7.0");
}
@@ -1747,13 +1828,35 @@ void Generic_GCC::CudaInstallationDetector::init(
BinPath = CudaPath + "/bin";
IncludePath = InstallPath + "/include";
LibDevicePath = InstallPath + "/nvvm/libdevice";
- LibPath = InstallPath + (TargetTriple.isArch64Bit() ? "/lib64" : "/lib");
auto &FS = D.getVFS();
- if (!(FS.exists(IncludePath) && FS.exists(BinPath) && FS.exists(LibPath) &&
+ if (!(FS.exists(IncludePath) && FS.exists(BinPath) &&
FS.exists(LibDevicePath)))
continue;
+ // On Linux, we have both lib and lib64 directories, and we need to choose
+ // based on our triple. On MacOS, we have only a lib directory.
+ //
+ // It's sufficient for our purposes to be flexible: If both lib and lib64
+ // exist, we choose whichever one matches our triple. Otherwise, if only
+ // lib exists, we use it.
+ if (TargetTriple.isArch64Bit() && FS.exists(InstallPath + "/lib64"))
+ LibPath = InstallPath + "/lib64";
+ else if (FS.exists(InstallPath + "/lib"))
+ LibPath = InstallPath + "/lib";
+ else
+ continue;
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> VersionFile =
+ FS.getBufferForFile(InstallPath + "/version.txt");
+ if (!VersionFile) {
+ // CUDA 7.0 doesn't have a version.txt, so guess that's our version if
+ // version.txt isn't present.
+ Version = CudaVersion::CUDA_70;
+ } else {
+ Version = ParseCudaVersionFile((*VersionFile)->getBuffer());
+ }
+
std::error_code EC;
for (llvm::sys::fs::directory_iterator LI(LibDevicePath, EC), LE;
!EC && LI != LE; LI = LI.increment(EC)) {
@@ -1766,42 +1869,67 @@ void Generic_GCC::CudaInstallationDetector::init(
StringRef GpuArch = FileName.slice(
LibDeviceName.size(), FileName.find('.', LibDeviceName.size()));
LibDeviceMap[GpuArch] = FilePath.str();
- // Insert map entries for specifc devices with this compute capability.
+ // Insert map entries for specifc devices with this compute
+ // capability. NVCC's choice of the libdevice library version is
+ // rather peculiar and depends on the CUDA version.
if (GpuArch == "compute_20") {
LibDeviceMap["sm_20"] = FilePath;
LibDeviceMap["sm_21"] = FilePath;
+ LibDeviceMap["sm_32"] = FilePath;
} else if (GpuArch == "compute_30") {
LibDeviceMap["sm_30"] = FilePath;
- LibDeviceMap["sm_32"] = FilePath;
+ if (Version < CudaVersion::CUDA_80) {
+ LibDeviceMap["sm_50"] = FilePath;
+ LibDeviceMap["sm_52"] = FilePath;
+ LibDeviceMap["sm_53"] = FilePath;
+ }
+ LibDeviceMap["sm_60"] = FilePath;
+ LibDeviceMap["sm_61"] = FilePath;
+ LibDeviceMap["sm_62"] = FilePath;
} else if (GpuArch == "compute_35") {
LibDeviceMap["sm_35"] = FilePath;
LibDeviceMap["sm_37"] = FilePath;
} else if (GpuArch == "compute_50") {
- LibDeviceMap["sm_50"] = FilePath;
- LibDeviceMap["sm_52"] = FilePath;
- LibDeviceMap["sm_53"] = FilePath;
- LibDeviceMap["sm_60"] = FilePath;
- LibDeviceMap["sm_61"] = FilePath;
- LibDeviceMap["sm_62"] = FilePath;
+ if (Version >= CudaVersion::CUDA_80) {
+ LibDeviceMap["sm_50"] = FilePath;
+ LibDeviceMap["sm_52"] = FilePath;
+ LibDeviceMap["sm_53"] = FilePath;
+ }
}
}
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> VersionFile =
- FS.getBufferForFile(InstallPath + "/version.txt");
- if (!VersionFile) {
- // CUDA 7.0 doesn't have a version.txt, so guess that's our version if
- // version.txt isn't present.
- Version = CudaVersion::CUDA_70;
- } else {
- Version = ParseCudaVersionFile((*VersionFile)->getBuffer());
- }
-
IsValid = true;
break;
}
}
-void Generic_GCC::CudaInstallationDetector::CheckCudaVersionSupportsArch(
+void CudaInstallationDetector::AddCudaIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ // Add cuda_wrappers/* to our system include path. This lets us wrap
+ // standard library headers.
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ llvm::sys::path::append(P, "cuda_wrappers");
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(P));
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nocudainc))
+ return;
+
+ if (!isValid()) {
+ D.Diag(diag::err_drv_no_cuda_installation);
+ return;
+ }
+
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(getIncludePath()));
+ CC1Args.push_back("-include");
+ CC1Args.push_back("__clang_cuda_runtime_wrapper.h");
+}
+
+void CudaInstallationDetector::CheckCudaVersionSupportsArch(
CudaArch Arch) const {
if (Arch == CudaArch::UNKNOWN || Version == CudaVersion::UNKNOWN ||
ArchsWithVersionTooLowErrors.count(Arch) > 0)
@@ -1816,7 +1944,7 @@ void Generic_GCC::CudaInstallationDetector::CheckCudaVersionSupportsArch(
}
}
-void Generic_GCC::CudaInstallationDetector::print(raw_ostream &OS) const {
+void CudaInstallationDetector::print(raw_ostream &OS) const {
if (isValid())
OS << "Found CUDA installation: " << InstallPath << ", version "
<< CudaVersionToString(Version) << "\n";
@@ -1985,7 +2113,8 @@ static bool findMipsCsMultilibs(const Multilib::flags_list &Flags,
return false;
}
-static bool findMipsAndroidMultilibs(const Multilib::flags_list &Flags,
+static bool findMipsAndroidMultilibs(vfs::FileSystem &VFS, StringRef Path,
+ const Multilib::flags_list &Flags,
FilterNonExistent &NonExistent,
DetectedMultilibs &Result) {
@@ -1995,8 +2124,29 @@ static bool findMipsAndroidMultilibs(const Multilib::flags_list &Flags,
.Maybe(Multilib("/mips-r6").flag("+march=mips32r6"))
.FilterOut(NonExistent);
- if (AndroidMipsMultilibs.select(Flags, Result.SelectedMultilib)) {
- Result.Multilibs = AndroidMipsMultilibs;
+ MultilibSet AndroidMipselMultilibs =
+ MultilibSet()
+ .Either(Multilib().flag("+march=mips32"),
+ Multilib("/mips-r2", "", "/mips-r2").flag("+march=mips32r2"),
+ Multilib("/mips-r6", "", "/mips-r6").flag("+march=mips32r6"))
+ .FilterOut(NonExistent);
+
+ MultilibSet AndroidMips64elMultilibs =
+ MultilibSet()
+ .Either(
+ Multilib().flag("+march=mips64r6"),
+ Multilib("/32/mips-r1", "", "/mips-r1").flag("+march=mips32"),
+ Multilib("/32/mips-r2", "", "/mips-r2").flag("+march=mips32r2"),
+ Multilib("/32/mips-r6", "", "/mips-r6").flag("+march=mips32r6"))
+ .FilterOut(NonExistent);
+
+ MultilibSet *MS = &AndroidMipsMultilibs;
+ if (VFS.exists(Path + "/mips-r6"))
+ MS = &AndroidMipselMultilibs;
+ else if (VFS.exists(Path + "/32"))
+ MS = &AndroidMips64elMultilibs;
+ if (MS->select(Flags, Result.SelectedMultilib)) {
+ Result.Multilibs = *MS;
return true;
}
return false;
@@ -2323,6 +2473,7 @@ static bool findMIPSMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
addMultilibFlag(CPUName == "mips64r2" || CPUName == "mips64r3" ||
CPUName == "mips64r5" || CPUName == "octeon",
"march=mips64r2", Flags);
+ addMultilibFlag(CPUName == "mips64r6", "march=mips64r6", Flags);
addMultilibFlag(isMicroMips(Args), "mmicromips", Flags);
addMultilibFlag(tools::mips::isUCLibc(Args), "muclibc", Flags);
addMultilibFlag(tools::mips::isNaN2008(Args, TargetTriple), "mnan=2008",
@@ -2335,7 +2486,8 @@ static bool findMIPSMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
addMultilibFlag(!isMipsEL(TargetArch), "EB", Flags);
if (TargetTriple.isAndroid())
- return findMipsAndroidMultilibs(Flags, NonExistent, Result);
+ return findMipsAndroidMultilibs(D.getVFS(), Path, Flags, NonExistent,
+ Result);
if (TargetTriple.getVendor() == llvm::Triple::MipsTechnologies &&
TargetTriple.getOS() == llvm::Triple::Linux &&
@@ -2546,6 +2698,33 @@ void Generic_GCC::GCCInstallationDetector::scanLibDirForGCCTripleSolaris(
}
}
+bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
+ const llvm::Triple &TargetTriple, const ArgList &Args,
+ StringRef Path, bool NeedsBiarchSuffix) {
+ llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
+ DetectedMultilibs Detected;
+
+ // Android standalone toolchain could have multilibs for ARM and Thumb.
+ // Debian mips multilibs behave more like the rest of the biarch ones,
+ // so handle them there
+ if (isArmOrThumbArch(TargetArch) && TargetTriple.isAndroid()) {
+ // It should also work without multilibs in a simplified toolchain.
+ findAndroidArmMultilibs(D, TargetTriple, Path, Args, Detected);
+ } else if (isMipsArch(TargetArch)) {
+ if (!findMIPSMultilibs(D, TargetTriple, Path, Args, Detected))
+ return false;
+ } else if (!findBiarchMultilibs(D, TargetTriple, Path, Args,
+ NeedsBiarchSuffix, Detected)) {
+ return false;
+ }
+
+ Multilibs = Detected.Multilibs;
+ SelectedMultilib = Detected.SelectedMultilib;
+ BiarchSibling = Detected.BiarchSibling;
+
+ return true;
+}
+
void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
const llvm::Triple &TargetTriple, const ArgList &Args,
const std::string &LibDir, StringRef CandidateTriple,
@@ -2601,25 +2780,10 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
if (CandidateVersion <= Version)
continue;
- DetectedMultilibs Detected;
-
- // Android standalone toolchain could have multilibs for ARM and Thumb.
- // Debian mips multilibs behave more like the rest of the biarch ones,
- // so handle them there
- if (isArmOrThumbArch(TargetArch) && TargetTriple.isAndroid()) {
- // It should also work without multilibs in a simplified toolchain.
- findAndroidArmMultilibs(D, TargetTriple, LI->getName(), Args, Detected);
- } else if (isMipsArch(TargetArch)) {
- if (!findMIPSMultilibs(D, TargetTriple, LI->getName(), Args, Detected))
- continue;
- } else if (!findBiarchMultilibs(D, TargetTriple, LI->getName(), Args,
- NeedsBiarchSuffix, Detected)) {
+ if (!ScanGCCForMultilibs(TargetTriple, Args, LI->getName(),
+ NeedsBiarchSuffix))
continue;
- }
- Multilibs = Detected.Multilibs;
- SelectedMultilib = Detected.SelectedMultilib;
- BiarchSibling = Detected.BiarchSibling;
Version = CandidateVersion;
GCCTriple.setTriple(CandidateTriple);
// FIXME: We hack together the directory name here instead of
@@ -2633,9 +2797,49 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
}
}
+bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
+ const llvm::Triple &TargetTriple, const ArgList &Args,
+ StringRef CandidateTriple, bool NeedsBiarchSuffix) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
+ D.getVFS().getBufferForFile(D.SysRoot + "/etc/env.d/gcc/config-" +
+ CandidateTriple.str());
+ if (File) {
+ SmallVector<StringRef, 2> Lines;
+ File.get()->getBuffer().split(Lines, "\n");
+ for (StringRef Line : Lines) {
+ // CURRENT=triple-version
+ if (Line.consume_front("CURRENT=")) {
+ const std::pair<StringRef, StringRef> ActiveVersion =
+ Line.rsplit('-');
+ // Note: Strictly speaking, we should be reading
+ // /etc/env.d/gcc/${CURRENT} now. However, the file doesn't
+ // contain anything new or especially useful to us.
+ const std::string GentooPath = D.SysRoot + "/usr/lib/gcc/" +
+ ActiveVersion.first.str() + "/" +
+ ActiveVersion.second.str();
+ if (D.getVFS().exists(GentooPath + "/crtbegin.o")) {
+ if (!ScanGCCForMultilibs(TargetTriple, Args, GentooPath,
+ NeedsBiarchSuffix))
+ return false;
+
+ Version = GCCVersion::Parse(ActiveVersion.second);
+ GCCInstallPath = GentooPath;
+ GCCParentLibPath = GentooPath + "/../../..";
+ GCCTriple.setTriple(ActiveVersion.first);
+ IsValid = true;
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
Generic_GCC::Generic_GCC(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : ToolChain(D, Triple, Args), GCCInstallation(D), CudaInstallation(D) {
+ : ToolChain(D, Triple, Args), GCCInstallation(D),
+ CudaInstallation(D, Triple, Args) {
getProgramPaths().push_back(getDriver().getInstalledDir());
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
@@ -2675,7 +2879,15 @@ bool Generic_GCC::IsUnwindTablesDefault() const {
}
bool Generic_GCC::isPICDefault() const {
- return getArch() == llvm::Triple::x86_64 && getTriple().isOSWindows();
+ switch (getArch()) {
+ case llvm::Triple::x86_64:
+ return getTriple().isOSWindows();
+ case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
+ return !getTriple().isOSBinFormatMachO() && !getTriple().isMacOSX();
+ default:
+ return false;
+ }
}
bool Generic_GCC::isPIEDefault() const { return false; }
@@ -2703,11 +2915,50 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
case llvm::Triple::mips:
case llvm::Triple::mipsel:
return true;
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ // Enabled for Debian mips64/mips64el only. Other targets are unable to
+ // distinguish N32 from N64.
+ if (getTriple().getEnvironment() == llvm::Triple::GNUABI64)
+ return true;
+ return false;
default:
return false;
}
}
+void Generic_GCC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx: {
+ std::string Path = findLibCxxIncludePath();
+ if (!Path.empty())
+ addSystemInclude(DriverArgs, CC1Args, Path);
+ break;
+ }
+
+ case ToolChain::CST_Libstdcxx:
+ addLibStdCxxIncludePaths(DriverArgs, CC1Args);
+ break;
+ }
+}
+
+std::string Generic_GCC::findLibCxxIncludePath() const {
+ // FIXME: The Linux behavior would probaby be a better approach here.
+ return getDriver().SysRoot + "/usr/include/c++/v1";
+}
+
+void
+Generic_GCC::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ // By default, we don't assume we know where libstdc++ might be installed.
+ // FIXME: If we have a valid GCCInstallation, use it.
+}
+
/// \brief Helper to add the variant paths of a libstdc++ installation.
bool Generic_GCC::addLibStdCXXIncludePaths(
Twine Base, Twine Suffix, StringRef GCCTriple, StringRef GCCMultiarchTriple,
@@ -2741,6 +2992,49 @@ bool Generic_GCC::addLibStdCXXIncludePaths(
return true;
}
+llvm::opt::DerivedArgList *
+Generic_GCC::TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef,
+ Action::OffloadKind DeviceOffloadKind) const {
+
+ // If this tool chain is used for an OpenMP offloading device we have to make
+ // sure we always generate a shared library regardless of the commands the
+ // user passed to the host. This is required because the runtime library
+ // is required to load the device image dynamically at run time.
+ if (DeviceOffloadKind == Action::OFK_OpenMP) {
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+ const OptTable &Opts = getDriver().getOpts();
+
+ // Request the shared library. Given that these options are decided
+ // implicitly, they do not refer to any base argument.
+ DAL->AddFlagArg(/*BaseArg=*/nullptr, Opts.getOption(options::OPT_shared));
+ DAL->AddFlagArg(/*BaseArg=*/nullptr, Opts.getOption(options::OPT_fPIC));
+
+ // Filter all the arguments we don't care passing to the offloading
+ // toolchain as they can mess up with the creation of a shared library.
+ for (auto *A : Args) {
+ switch ((options::ID)A->getOption().getID()) {
+ default:
+ DAL->append(A);
+ break;
+ case options::OPT_shared:
+ case options::OPT_dynamic:
+ case options::OPT_static:
+ case options::OPT_fPIC:
+ case options::OPT_fno_PIC:
+ case options::OPT_fpic:
+ case options::OPT_fno_pic:
+ case options::OPT_fPIE:
+ case options::OPT_fno_PIE:
+ case options::OPT_fpie:
+ case options::OPT_fno_pie:
+ break;
+ }
+ }
+ return DAL;
+ }
+ return nullptr;
+}
+
void Generic_ELF::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
const Generic_GCC::GCCVersion &V = GCCInstallation.getVersion();
@@ -2773,9 +3067,6 @@ MipsLLVMToolChain::MipsLLVMToolChain(const Driver &D,
LibSuffix = tools::mips::getMipsABILibSuffix(Args, Triple);
getFilePaths().clear();
getFilePaths().push_back(computeSysRoot() + "/usr/lib" + LibSuffix);
-
- // Use LLD by default.
- DefaultLinker = "lld";
}
void MipsLLVMToolChain::AddClangSystemIncludeArgs(
@@ -2832,25 +3123,16 @@ MipsLLVMToolChain::GetCXXStdlibType(const ArgList &Args) const {
return ToolChain::CST_Libcxx;
}
-void MipsLLVMToolChain::AddClangCXXStdlibIncludeArgs(
- const ArgList &DriverArgs, ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
-
- assert((GetCXXStdlibType(DriverArgs) == ToolChain::CST_Libcxx) &&
- "Only -lc++ (aka libcxx) is suported in this toolchain.");
-
- const auto &Callback = Multilibs.includeDirsCallback();
- if (Callback) {
+std::string MipsLLVMToolChain::findLibCxxIncludePath() const {
+ if (const auto &Callback = Multilibs.includeDirsCallback()) {
for (std::string Path : Callback(SelectedMultilib)) {
Path = getDriver().getInstalledDir() + Path + "/c++/v1";
if (llvm::sys::fs::exists(Path)) {
- addSystemInclude(DriverArgs, CC1Args, Path);
- break;
+ return Path;
}
}
}
+ return "";
}
void MipsLLVMToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
@@ -2890,7 +3172,7 @@ std::string HexagonToolChain::getHexagonTargetDir(
if (getVFS().exists(InstallRelDir = InstalledDir + "/../target"))
return InstallRelDir;
- return InstallRelDir;
+ return InstalledDir;
}
Optional<unsigned> HexagonToolChain::getSmallDataThreshold(
@@ -2997,15 +3279,14 @@ void HexagonToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addExternCSystemInclude(DriverArgs, CC1Args, TargetDir + "/hexagon/include");
}
-void HexagonToolChain::AddClangCXXStdlibIncludeArgs(
- const ArgList &DriverArgs, ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
+void HexagonToolChain::addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
const Driver &D = getDriver();
std::string TargetDir = getHexagonTargetDir(D.InstalledDir, D.PrefixDirs);
- addSystemInclude(DriverArgs, CC1Args, TargetDir + "/hexagon/include/c++");
+ addLibStdCXXIncludePaths(TargetDir, "/hexagon/include/c++", "", "", "", "",
+ DriverArgs, CC1Args);
}
ToolChain::CXXStdlibType
@@ -3163,37 +3444,25 @@ void NaClToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
CmdArgs.push_back("-lc++");
}
-void NaClToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
+std::string NaClToolChain::findLibCxxIncludePath() const {
const Driver &D = getDriver();
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
-
- // Check for -stdlib= flags. We only support libc++ but this consumes the arg
- // if the value is libc++, and emits an error for other values.
- GetCXXStdlibType(DriverArgs);
SmallString<128> P(D.Dir + "/../");
switch (getTriple().getArch()) {
case llvm::Triple::arm:
llvm::sys::path::append(P, "arm-nacl/include/c++/v1");
- addSystemInclude(DriverArgs, CC1Args, P.str());
- break;
+ return P.str();
case llvm::Triple::x86:
llvm::sys::path::append(P, "x86_64-nacl/include/c++/v1");
- addSystemInclude(DriverArgs, CC1Args, P.str());
- break;
+ return P.str();
case llvm::Triple::x86_64:
llvm::sys::path::append(P, "x86_64-nacl/include/c++/v1");
- addSystemInclude(DriverArgs, CC1Args, P.str());
- break;
+ return P.str();
case llvm::Triple::mipsel:
llvm::sys::path::append(P, "mipsel-nacl/include/c++/v1");
- addSystemInclude(DriverArgs, CC1Args, P.str());
- break;
+ return P.str();
default:
- break;
+ return "";
}
}
@@ -3254,6 +3523,13 @@ bool TCEToolChain::isPIEDefault() const { return false; }
bool TCEToolChain::isPICDefaultForced() const { return false; }
+TCELEToolChain::TCELEToolChain(const Driver &D, const llvm::Triple& Triple,
+ const ArgList &Args)
+ : TCEToolChain(D, Triple, Args) {
+}
+
+TCELEToolChain::~TCELEToolChain() {}
+
// CloudABI - CloudABI tool chain which can call ld(1) directly.
CloudABI::CloudABI(const Driver &D, const llvm::Triple &Triple,
@@ -3264,15 +3540,10 @@ CloudABI::CloudABI(const Driver &D, const llvm::Triple &Triple,
getFilePaths().push_back(P.str());
}
-void CloudABI::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) &&
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
-
+std::string CloudABI::findLibCxxIncludePath() const {
SmallString<128> P(getDriver().Dir);
llvm::sys::path::append(P, "..", getTriple().str(), "include/c++/v1");
- addSystemInclude(DriverArgs, CC1Args, P.str());
+ return P.str();
}
void CloudABI::AddCXXStdlibLibArgs(const ArgList &Args,
@@ -3316,29 +3587,14 @@ Haiku::Haiku(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
}
-void Haiku::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
-
- switch (GetCXXStdlibType(DriverArgs)) {
- case ToolChain::CST_Libcxx:
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/system/develop/headers/c++/v1");
- break;
- case ToolChain::CST_Libstdcxx:
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/system/develop/headers/c++");
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/system/develop/headers/c++/backward");
+std::string Haiku::findLibCxxIncludePath() const {
+ return getDriver().SysRoot + "/system/develop/headers/c++/v1";
+}
- StringRef Triple = getTriple().str();
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/system/develop/headers/c++/" +
- Triple);
- break;
- }
+void Haiku::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ addLibStdCXXIncludePaths(getDriver().SysRoot, "/system/develop/headers/c++",
+ getTriple().str(), "", "", "", DriverArgs, CC1Args);
}
/// OpenBSD - OpenBSD tool chain which can call as(1) and ld(1) directly.
@@ -3374,34 +3630,13 @@ ToolChain::CXXStdlibType Bitrig::GetDefaultCXXStdlibType() const {
return ToolChain::CST_Libcxx;
}
-void Bitrig::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
-
- switch (GetCXXStdlibType(DriverArgs)) {
- case ToolChain::CST_Libcxx:
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/v1");
- break;
- case ToolChain::CST_Libstdcxx:
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/stdc++");
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/stdc++/backward");
-
- StringRef Triple = getTriple().str();
- if (Triple.startswith("amd64"))
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/stdc++/x86_64" +
- Triple.substr(5));
- else
- addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot +
- "/usr/include/c++/stdc++/" +
- Triple);
- break;
- }
+void Bitrig::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ std::string Triple = getTriple().str();
+ if (StringRef(Triple).startswith("amd64"))
+ Triple = "x86_64" + Triple.substr(5);
+ addLibStdCXXIncludePaths(getDriver().SysRoot, "/usr/include/c++/stdc++",
+ Triple, "", "", "", DriverArgs, CC1Args);
}
void Bitrig::AddCXXStdlibLibArgs(const ArgList &Args,
@@ -3440,24 +3675,11 @@ ToolChain::CXXStdlibType FreeBSD::GetDefaultCXXStdlibType() const {
return ToolChain::CST_Libstdcxx;
}
-void FreeBSD::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
-
- switch (GetCXXStdlibType(DriverArgs)) {
- case ToolChain::CST_Libcxx:
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/v1");
- break;
- case ToolChain::CST_Libstdcxx:
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/4.2");
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/4.2/backward");
- break;
- }
+void FreeBSD::addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ addLibStdCXXIncludePaths(getDriver().SysRoot, "/usr/include/c++/4.2", "", "",
+ "", "", DriverArgs, CC1Args);
}
void FreeBSD::AddCXXStdlibLibArgs(const ArgList &Args,
@@ -3602,24 +3824,14 @@ ToolChain::CXXStdlibType NetBSD::GetDefaultCXXStdlibType() const {
return ToolChain::CST_Libstdcxx;
}
-void NetBSD::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
+std::string NetBSD::findLibCxxIncludePath() const {
+ return getDriver().SysRoot + "/usr/include/c++/";
+}
- switch (GetCXXStdlibType(DriverArgs)) {
- case ToolChain::CST_Libcxx:
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/c++/");
- break;
- case ToolChain::CST_Libstdcxx:
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/g++");
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/usr/include/g++/backward");
- break;
- }
+void NetBSD::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ addLibStdCXXIncludePaths(getDriver().SysRoot, "/usr/include/g++", "", "", "",
+ "", DriverArgs, CC1Args);
}
/// Minix - Minix tool chain which can call as(1) and ld(1) directly.
@@ -3692,6 +3904,9 @@ void Solaris::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
// Include the support directory for things like xlocale and fudged system
// headers.
+ // FIXME: This is a weird mix of libc++ and libstdc++. We should also be
+ // checking the value of -stdlib= here and adding the includes for libc++
+ // rather than libstdc++ if it's requested.
addSystemInclude(DriverArgs, CC1Args, "/usr/include/c++/v1/support/solaris");
if (GCCInstallation.isValid()) {
@@ -3709,137 +3924,6 @@ void Solaris::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
}
}
-/// Distribution (very bare-bones at the moment).
-
-enum Distro {
- // NB: Releases of a particular Linux distro should be kept together
- // in this enum, because some tests are done by integer comparison against
- // the first and last known member in the family, e.g. IsRedHat().
- ArchLinux,
- DebianLenny,
- DebianSqueeze,
- DebianWheezy,
- DebianJessie,
- DebianStretch,
- Exherbo,
- RHEL5,
- RHEL6,
- RHEL7,
- Fedora,
- OpenSUSE,
- UbuntuHardy,
- UbuntuIntrepid,
- UbuntuJaunty,
- UbuntuKarmic,
- UbuntuLucid,
- UbuntuMaverick,
- UbuntuNatty,
- UbuntuOneiric,
- UbuntuPrecise,
- UbuntuQuantal,
- UbuntuRaring,
- UbuntuSaucy,
- UbuntuTrusty,
- UbuntuUtopic,
- UbuntuVivid,
- UbuntuWily,
- UbuntuXenial,
- UnknownDistro
-};
-
-static bool IsRedhat(enum Distro Distro) {
- return Distro == Fedora || (Distro >= RHEL5 && Distro <= RHEL7);
-}
-
-static bool IsOpenSUSE(enum Distro Distro) { return Distro == OpenSUSE; }
-
-static bool IsDebian(enum Distro Distro) {
- return Distro >= DebianLenny && Distro <= DebianStretch;
-}
-
-static bool IsUbuntu(enum Distro Distro) {
- return Distro >= UbuntuHardy && Distro <= UbuntuXenial;
-}
-
-static Distro DetectDistro(const Driver &D, llvm::Triple::ArchType Arch) {
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
- llvm::MemoryBuffer::getFile("/etc/lsb-release");
- if (File) {
- StringRef Data = File.get()->getBuffer();
- SmallVector<StringRef, 16> Lines;
- Data.split(Lines, "\n");
- Distro Version = UnknownDistro;
- for (StringRef Line : Lines)
- if (Version == UnknownDistro && Line.startswith("DISTRIB_CODENAME="))
- Version = llvm::StringSwitch<Distro>(Line.substr(17))
- .Case("hardy", UbuntuHardy)
- .Case("intrepid", UbuntuIntrepid)
- .Case("jaunty", UbuntuJaunty)
- .Case("karmic", UbuntuKarmic)
- .Case("lucid", UbuntuLucid)
- .Case("maverick", UbuntuMaverick)
- .Case("natty", UbuntuNatty)
- .Case("oneiric", UbuntuOneiric)
- .Case("precise", UbuntuPrecise)
- .Case("quantal", UbuntuQuantal)
- .Case("raring", UbuntuRaring)
- .Case("saucy", UbuntuSaucy)
- .Case("trusty", UbuntuTrusty)
- .Case("utopic", UbuntuUtopic)
- .Case("vivid", UbuntuVivid)
- .Case("wily", UbuntuWily)
- .Case("xenial", UbuntuXenial)
- .Default(UnknownDistro);
- if (Version != UnknownDistro)
- return Version;
- }
-
- File = llvm::MemoryBuffer::getFile("/etc/redhat-release");
- if (File) {
- StringRef Data = File.get()->getBuffer();
- if (Data.startswith("Fedora release"))
- return Fedora;
- if (Data.startswith("Red Hat Enterprise Linux") ||
- Data.startswith("CentOS") ||
- Data.startswith("Scientific Linux")) {
- if (Data.find("release 7") != StringRef::npos)
- return RHEL7;
- else if (Data.find("release 6") != StringRef::npos)
- return RHEL6;
- else if (Data.find("release 5") != StringRef::npos)
- return RHEL5;
- }
- return UnknownDistro;
- }
-
- File = llvm::MemoryBuffer::getFile("/etc/debian_version");
- if (File) {
- StringRef Data = File.get()->getBuffer();
- if (Data[0] == '5')
- return DebianLenny;
- else if (Data.startswith("squeeze/sid") || Data[0] == '6')
- return DebianSqueeze;
- else if (Data.startswith("wheezy/sid") || Data[0] == '7')
- return DebianWheezy;
- else if (Data.startswith("jessie/sid") || Data[0] == '8')
- return DebianJessie;
- else if (Data.startswith("stretch/sid") || Data[0] == '9')
- return DebianStretch;
- return UnknownDistro;
- }
-
- if (D.getVFS().exists("/etc/SuSE-release"))
- return OpenSUSE;
-
- if (D.getVFS().exists("/etc/exherbo-release"))
- return Exherbo;
-
- if (D.getVFS().exists("/etc/arch-release"))
- return ArchLinux;
-
- return UnknownDistro;
-}
-
/// \brief Get our best guess at the multiarch triple for a target.
///
/// Debian-based systems are starting to use a multiarch setup where they use
@@ -3952,6 +4036,15 @@ static std::string getMultiarchTriple(const Driver &D,
static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
if (isMipsArch(Triple.getArch())) {
+ if (Triple.isAndroid()) {
+ StringRef CPUName;
+ StringRef ABIName;
+ tools::mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
+ if (CPUName == "mips32r6")
+ return "libr6";
+ if (CPUName == "mips32r2")
+ return "libr2";
+ }
// lib32 directory has a special meaning on MIPS targets.
// It contains N32 ABI binaries. Use this folder if produce
// code for N32 ABI only.
@@ -3992,7 +4085,6 @@ static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs,
Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
- CudaInstallation.init(Triple, Args);
Multilibs = GCCInstallation.getMultilibs();
llvm::Triple::ArchType Arch = Triple.getArch();
std::string SysRoot = computeSysRoot();
@@ -4010,9 +4102,9 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
GCCInstallation.getTriple().str() + "/bin")
.str());
- Distro Distro = DetectDistro(D, Arch);
+ Distro Distro(D.getVFS());
- if (IsOpenSUSE(Distro) || IsUbuntu(Distro)) {
+ if (Distro.IsOpenSUSE() || Distro.IsUbuntu()) {
ExtraOpts.push_back("-z");
ExtraOpts.push_back("relro");
}
@@ -4032,23 +4124,23 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// ABI requires a mapping between the GOT and the symbol table.
// Android loader does not support .gnu.hash.
if (!IsMips && !IsAndroid) {
- if (IsRedhat(Distro) || IsOpenSUSE(Distro) ||
- (IsUbuntu(Distro) && Distro >= UbuntuMaverick))
+ if (Distro.IsRedhat() || Distro.IsOpenSUSE() ||
+ (Distro.IsUbuntu() && Distro >= Distro::UbuntuMaverick))
ExtraOpts.push_back("--hash-style=gnu");
- if (IsDebian(Distro) || IsOpenSUSE(Distro) || Distro == UbuntuLucid ||
- Distro == UbuntuJaunty || Distro == UbuntuKarmic)
+ if (Distro.IsDebian() || Distro.IsOpenSUSE() || Distro == Distro::UbuntuLucid ||
+ Distro == Distro::UbuntuJaunty || Distro == Distro::UbuntuKarmic)
ExtraOpts.push_back("--hash-style=both");
}
- if (IsRedhat(Distro) && Distro != RHEL5 && Distro != RHEL6)
+ if (Distro.IsRedhat() && Distro != Distro::RHEL5 && Distro != Distro::RHEL6)
ExtraOpts.push_back("--no-add-needed");
#ifdef ENABLE_LINKER_BUILD_ID
ExtraOpts.push_back("--build-id");
#endif
- if (IsOpenSUSE(Distro))
+ if (Distro.IsOpenSUSE())
ExtraOpts.push_back("--enable-new-dtags");
// The selection of paths to try here is designed to match the patterns which
@@ -4214,23 +4306,32 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
const llvm::Triple::ArchType Arch = getArch();
const llvm::Triple &Triple = getTriple();
- const enum Distro Distro = DetectDistro(getDriver(), Arch);
+ const Distro Distro(getDriver().getVFS());
if (Triple.isAndroid())
return Triple.isArch64Bit() ? "/system/bin/linker64" : "/system/bin/linker";
- else if (Triple.isMusl()) {
+
+ if (Triple.isMusl()) {
std::string ArchName;
+ bool IsArm = false;
+
switch (Arch) {
+ case llvm::Triple::arm:
case llvm::Triple::thumb:
ArchName = "arm";
+ IsArm = true;
break;
+ case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
ArchName = "armeb";
+ IsArm = true;
break;
default:
ArchName = Triple.getArchName().str();
}
- if (Triple.getEnvironment() == llvm::Triple::MuslEABIHF)
+ if (IsArm &&
+ (Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
+ tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard))
ArchName += "hf";
return "/lib/ld-musl-" + ArchName + ".so.1";
@@ -4323,8 +4424,8 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
}
}
- if (Distro == Exherbo && (Triple.getVendor() == llvm::Triple::UnknownVendor ||
- Triple.getVendor() == llvm::Triple::PC))
+ if (Distro == Distro::Exherbo && (Triple.getVendor() == llvm::Triple::UnknownVendor ||
+ Triple.getVendor() == llvm::Triple::PC))
return "/usr/" + Triple.str() + "/lib/" + Loader;
return "/" + LibDir + "/" + Loader;
}
@@ -4517,33 +4618,27 @@ static std::string DetectLibcxxIncludePath(StringRef base) {
return MaxVersion ? (base + "/" + MaxVersionString).str() : "";
}
-void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
- ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
-
- // Check if libc++ has been enabled and provide its include paths if so.
- if (GetCXXStdlibType(DriverArgs) == ToolChain::CST_Libcxx) {
- const std::string LibCXXIncludePathCandidates[] = {
- DetectLibcxxIncludePath(getDriver().Dir + "/../include/c++"),
- // If this is a development, non-installed, clang, libcxx will
- // not be found at ../include/c++ but it likely to be found at
- // one of the following two locations:
- DetectLibcxxIncludePath(getDriver().SysRoot + "/usr/local/include/c++"),
- DetectLibcxxIncludePath(getDriver().SysRoot + "/usr/include/c++") };
- for (const auto &IncludePath : LibCXXIncludePathCandidates) {
- if (IncludePath.empty() || !getVFS().exists(IncludePath))
- continue;
- // Add the first candidate that exists.
- addSystemInclude(DriverArgs, CC1Args, IncludePath);
- break;
- }
- return;
+std::string Linux::findLibCxxIncludePath() const {
+ const std::string LibCXXIncludePathCandidates[] = {
+ DetectLibcxxIncludePath(getDriver().Dir + "/../include/c++"),
+ // If this is a development, non-installed, clang, libcxx will
+ // not be found at ../include/c++ but it likely to be found at
+ // one of the following two locations:
+ DetectLibcxxIncludePath(getDriver().SysRoot + "/usr/local/include/c++"),
+ DetectLibcxxIncludePath(getDriver().SysRoot + "/usr/include/c++") };
+ for (const auto &IncludePath : LibCXXIncludePathCandidates) {
+ if (IncludePath.empty() || !getVFS().exists(IncludePath))
+ continue;
+ // Use the first candidate that exists.
+ return IncludePath;
}
+ return "";
+}
+void Linux::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
// We need a detected GCC installation on Linux to provide libstdc++'s
- // headers. We handled the libc++ case above.
+ // headers.
if (!GCCInstallation.isValid())
return;
@@ -4594,17 +4689,7 @@ void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
void Linux::AddCudaIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nocudainc))
- return;
-
- if (!CudaInstallation.isValid()) {
- getDriver().Diag(diag::err_drv_no_cuda_installation);
- return;
- }
-
- addSystemInclude(DriverArgs, CC1Args, CudaInstallation.getIncludePath());
- CC1Args.push_back("-include");
- CC1Args.push_back("__clang_cuda_runtime_wrapper.h");
+ CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
void Linux::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
@@ -4641,7 +4726,7 @@ SanitizerMask Linux::getSupportedSanitizers() const {
Res |= SanitizerKind::Thread;
if (IsX86_64 || IsMIPS64 || IsPowerPC64 || IsAArch64)
Res |= SanitizerKind::Memory;
- if (IsX86_64)
+ if (IsX86_64 || IsMIPS64)
Res |= SanitizerKind::Efficiency;
if (IsX86 || IsX86_64) {
Res |= SanitizerKind::Function;
@@ -4661,6 +4746,99 @@ void Linux::addProfileRTLibs(const llvm::opt::ArgList &Args,
ToolChain::addProfileRTLibs(Args, CmdArgs);
}
+/// Fuchsia - Fuchsia tool chain which can call as(1) and ld(1) directly.
+
+Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+
+ getFilePaths().push_back(D.SysRoot + "/lib");
+ getFilePaths().push_back(D.ResourceDir + "/lib/fuchsia");
+}
+
+Tool *Fuchsia::buildAssembler() const {
+ return new tools::gnutools::Assembler(*this);
+}
+
+Tool *Fuchsia::buildLinker() const {
+ return new tools::fuchsia::Linker(*this);
+}
+
+ToolChain::RuntimeLibType Fuchsia::GetRuntimeLibType(
+ const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_rtlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value != "compiler-rt")
+ getDriver().Diag(diag::err_drv_invalid_rtlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::RLT_CompilerRT;
+}
+
+ToolChain::CXXStdlibType
+Fuchsia::GetCXXStdlibType(const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value != "libc++")
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::CST_Libcxx;
+}
+
+void Fuchsia::addClangTargetOptions(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasFlag(options::OPT_fuse_init_array,
+ options::OPT_fno_use_init_array, true))
+ CC1Args.push_back("-fuse-init-array");
+}
+
+void Fuchsia::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "include");
+ addSystemInclude(DriverArgs, CC1Args, P);
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (StringRef dir : dirs) {
+ StringRef Prefix =
+ llvm::sys::path::is_absolute(dir) ? StringRef(D.SysRoot) : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + dir);
+ }
+ return;
+ }
+
+ addExternCSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/include");
+}
+
+std::string Fuchsia::findLibCxxIncludePath() const {
+ return getDriver().SysRoot + "/include/c++/v1";
+}
+
+void Fuchsia::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ (void) GetCXXStdlibType(Args);
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lunwind");
+}
+
/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
DragonFly::DragonFly(const Driver &D, const llvm::Triple &Triple,
@@ -4690,16 +4868,18 @@ Tool *DragonFly::buildLinker() const {
/// together object files from the assembler into a single blob.
CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
- : Linux(D, Triple, Args) {
+ const ToolChain &HostTC, const ArgList &Args)
+ : ToolChain(D, Triple, Args), HostTC(HostTC),
+ CudaInstallation(D, Triple, Args) {
if (CudaInstallation.isValid())
getProgramPaths().push_back(CudaInstallation.getBinPath());
}
-void
-CudaToolChain::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const {
- Linux::addClangTargetOptions(DriverArgs, CC1Args);
+void CudaToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ HostTC.addClangTargetOptions(DriverArgs, CC1Args);
+
CC1Args.push_back("-fcuda-is-device");
if (DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
@@ -4713,18 +4893,23 @@ CudaToolChain::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
if (DriverArgs.hasArg(options::OPT_nocudalib))
return;
- std::string LibDeviceFile = CudaInstallation.getLibDeviceFile(
- DriverArgs.getLastArgValue(options::OPT_march_EQ));
- if (!LibDeviceFile.empty()) {
- CC1Args.push_back("-mlink-cuda-bitcode");
- CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
-
- // Libdevice in CUDA-7.0 requires PTX version that's more recent
- // than LLVM defaults to. Use PTX4.2 which is the PTX version that
- // came with CUDA-7.0.
- CC1Args.push_back("-target-feature");
- CC1Args.push_back("+ptx42");
+ StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_march_EQ);
+ assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
+ std::string LibDeviceFile = CudaInstallation.getLibDeviceFile(GpuArch);
+
+ if (LibDeviceFile.empty()) {
+ getDriver().Diag(diag::err_drv_no_cuda_libdevice) << GpuArch;
+ return;
}
+
+ CC1Args.push_back("-mlink-cuda-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
+
+ // Libdevice in CUDA-7.0 requires PTX version that's more recent
+ // than LLVM defaults to. Use PTX4.2 which is the PTX version that
+ // came with CUDA-7.0.
+ CC1Args.push_back("-target-feature");
+ CC1Args.push_back("+ptx42");
}
void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
@@ -4736,19 +4921,24 @@ void CudaToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
assert(!Arch.empty() && "Must have an explicit GPU arch.");
CudaInstallation.CheckCudaVersionSupportsArch(StringToCudaArch(Arch));
}
- Linux::AddCudaIncludeArgs(DriverArgs, CC1Args);
+ CudaInstallation.AddCudaIncludeArgs(DriverArgs, CC1Args);
}
llvm::opt::DerivedArgList *
CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
- const char *BoundArch) const {
- DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+ StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const {
+ DerivedArgList *DAL =
+ HostTC.TranslateArgs(Args, BoundArch, DeviceOffloadKind);
+ if (!DAL)
+ DAL = new DerivedArgList(Args.getBaseArgs());
+
const OptTable &Opts = getDriver().getOpts();
for (Arg *A : Args) {
if (A->getOption().matches(options::OPT_Xarch__)) {
// Skip this argument unless the architecture matches BoundArch
- if (!BoundArch || A->getValue(0) != StringRef(BoundArch))
+ if (BoundArch.empty() || A->getValue(0) != BoundArch)
continue;
unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
@@ -4779,7 +4969,7 @@ CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
DAL->append(A);
}
- if (BoundArch) {
+ if (!BoundArch.empty()) {
DAL->eraseArg(options::OPT_march_EQ);
DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), BoundArch);
}
@@ -4794,6 +4984,43 @@ Tool *CudaToolChain::buildLinker() const {
return new tools::NVPTX::Linker(*this);
}
+void CudaToolChain::addClangWarningOptions(ArgStringList &CC1Args) const {
+ HostTC.addClangWarningOptions(CC1Args);
+}
+
+ToolChain::CXXStdlibType
+CudaToolChain::GetCXXStdlibType(const ArgList &Args) const {
+ return HostTC.GetCXXStdlibType(Args);
+}
+
+void CudaToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
+}
+
+void CudaToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &Args,
+ ArgStringList &CC1Args) const {
+ HostTC.AddClangCXXStdlibIncludeArgs(Args, CC1Args);
+}
+
+void CudaToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
+ ArgStringList &CC1Args) const {
+ HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
+}
+
+SanitizerMask CudaToolChain::getSupportedSanitizers() const {
+ // The CudaToolChain only supports sanitizers in the sense that it allows
+ // sanitizer arguments on the command line if they are supported by the host
+ // toolchain. The CudaToolChain will actually ignore any command line
+ // arguments for any of these "supported" sanitizers. That means that no
+ // sanitization of device code is actually supported at this time.
+ //
+ // This behavior is necessary because the host and device toolchains
+ // invocations often share the command line, so the device toolchain must
+ // tolerate flags meant only for the host toolchain.
+ return HostTC.getSupportedSanitizers();
+}
+
/// XCore tool chain
XCoreToolChain::XCoreToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
@@ -4878,24 +5105,13 @@ MyriadToolChain::MyriadToolChain(const Driver &D, const llvm::Triple &Triple,
}
if (GCCInstallation.isValid()) {
- // The contents of LibDir are independent of the version of gcc.
- // This contains libc, libg (a superset of libc), libm, libstdc++, libssp.
- SmallString<128> LibDir(GCCInstallation.getParentLibPath());
- if (Triple.getArch() == llvm::Triple::sparcel)
- llvm::sys::path::append(LibDir, "../sparc-myriad-elf/lib/le");
- else
- llvm::sys::path::append(LibDir, "../sparc-myriad-elf/lib");
- addPathIfExists(D, LibDir, getFilePaths());
-
// This directory contains crt{i,n,begin,end}.o as well as libgcc.
// These files are tied to a particular version of gcc.
SmallString<128> CompilerSupportDir(GCCInstallation.getInstallPath());
- // There are actually 4 choices: {le,be} x {fpu,nofpu}
- // but as this toolchain is for LEON sparc, it can assume FPU.
- if (Triple.getArch() == llvm::Triple::sparcel)
- llvm::sys::path::append(CompilerSupportDir, "le");
addPathIfExists(D, CompilerSupportDir, getFilePaths());
}
+ // libstd++ and libc++ must both be found in this one place.
+ addPathIfExists(D, D.Dir + "/../sparc-myriad-elf/lib", getFilePaths());
}
MyriadToolChain::~MyriadToolChain() {}
@@ -4906,18 +5122,18 @@ void MyriadToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot + "/include");
}
-void MyriadToolChain::AddClangCXXStdlibIncludeArgs(
- const ArgList &DriverArgs, ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
- return;
+std::string MyriadToolChain::findLibCxxIncludePath() const {
+ std::string Path(getDriver().getInstalledDir());
+ return Path + "/../include/c++/v1";
+}
- // Only libstdc++, for now.
+void MyriadToolChain::addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
StringRef LibDir = GCCInstallation.getParentLibPath();
const GCCVersion &Version = GCCInstallation.getVersion();
StringRef TripleStr = GCCInstallation.getTriple().str();
const Multilib &Multilib = GCCInstallation.getMultilib();
-
addLibStdCXXIncludePaths(
LibDir.str() + "/../" + TripleStr.str() + "/include/c++/" + Version.Text,
"", TripleStr, "", "", Multilib.includeSuffix(), DriverArgs, CC1Args);
@@ -4948,6 +5164,10 @@ Tool *MyriadToolChain::buildLinker() const {
return new tools::Myriad::Linker(*this);
}
+SanitizerMask MyriadToolChain::getSupportedSanitizers() const {
+ return SanitizerKind::Address;
+}
+
WebAssembly::WebAssembly(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args)
: ToolChain(D, Triple, Args) {
@@ -4955,9 +5175,6 @@ WebAssembly::WebAssembly(const Driver &D, const llvm::Triple &Triple,
assert(Triple.isArch32Bit() != Triple.isArch64Bit());
getFilePaths().push_back(
getDriver().SysRoot + "/lib" + (Triple.isArch32Bit() ? "32" : "64"));
-
- // Use LLD by default.
- DefaultLinker = "lld";
}
bool WebAssembly::IsMathErrnoDefault() const { return false; }
@@ -5005,9 +5222,8 @@ void WebAssembly::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addSystemInclude(DriverArgs, CC1Args, getDriver().SysRoot + "/include");
}
-void WebAssembly::AddClangCXXStdlibIncludeArgs(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const {
+void WebAssembly::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
if (!DriverArgs.hasArg(options::OPT_nostdlibinc) &&
!DriverArgs.hasArg(options::OPT_nostdincxx))
addSystemInclude(DriverArgs, CC1Args,
@@ -5091,3 +5307,14 @@ SanitizerMask PS4CPU::getSupportedSanitizers() const {
Res |= SanitizerKind::Vptr;
return Res;
}
+
+Contiki::Contiki(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {}
+
+SanitizerMask Contiki::getSupportedSanitizers() const {
+ const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
+ SanitizerMask Res = ToolChain::getSupportedSanitizers();
+ if (IsX86)
+ Res |= SanitizerKind::SafeStack;
+ return Res;
+}
diff --git a/lib/Driver/ToolChains.h b/lib/Driver/ToolChains.h
index 369712fa934b..7dab08915d48 100644
--- a/lib/Driver/ToolChains.h
+++ b/lib/Driver/ToolChains.h
@@ -16,7 +16,6 @@
#include "clang/Driver/Action.h"
#include "clang/Driver/Multilib.h"
#include "clang/Driver/ToolChain.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/Compiler.h"
@@ -25,6 +24,60 @@
namespace clang {
namespace driver {
+
+/// A class to find a viable CUDA installation
+class CudaInstallationDetector {
+private:
+ const Driver &D;
+ bool IsValid = false;
+ CudaVersion Version = CudaVersion::UNKNOWN;
+ std::string InstallPath;
+ std::string BinPath;
+ std::string LibPath;
+ std::string LibDevicePath;
+ std::string IncludePath;
+ llvm::StringMap<std::string> LibDeviceMap;
+
+ // CUDA architectures for which we have raised an error in
+ // CheckCudaVersionSupportsArch.
+ mutable llvm::SmallSet<CudaArch, 4> ArchsWithVersionTooLowErrors;
+
+public:
+ CudaInstallationDetector(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+
+ /// \brief Emit an error if Version does not support the given Arch.
+ ///
+ /// If either Version or Arch is unknown, does not emit an error. Emits at
+ /// most one error per Arch.
+ void CheckCudaVersionSupportsArch(CudaArch Arch) const;
+
+ /// \brief Check whether we detected a valid Cuda install.
+ bool isValid() const { return IsValid; }
+ /// \brief Print information about the detected CUDA installation.
+ void print(raw_ostream &OS) const;
+
+ /// \brief Get the detected Cuda install's version.
+ CudaVersion version() const { return Version; }
+ /// \brief Get the detected Cuda installation path.
+ StringRef getInstallPath() const { return InstallPath; }
+ /// \brief Get the detected path to Cuda's bin directory.
+ StringRef getBinPath() const { return BinPath; }
+ /// \brief Get the detected Cuda Include path.
+ StringRef getIncludePath() const { return IncludePath; }
+ /// \brief Get the detected Cuda library path.
+ StringRef getLibPath() const { return LibPath; }
+ /// \brief Get the detected Cuda device library path.
+ StringRef getLibDevicePath() const { return LibDevicePath; }
+ /// \brief Get libdevice file for given architecture
+ std::string getLibDeviceFile(StringRef Gpu) const {
+ return LibDeviceMap.lookup(Gpu);
+ }
+};
+
namespace toolchains {
/// Generic_GCC - A tool chain using the 'gcc' command to perform
@@ -143,6 +196,11 @@ public:
SmallVectorImpl<StringRef> &BiarchLibDirs,
SmallVectorImpl<StringRef> &BiarchTripleAliases);
+ bool ScanGCCForMultilibs(const llvm::Triple &TargetTriple,
+ const llvm::opt::ArgList &Args,
+ StringRef Path,
+ bool NeedsBiarchSuffix = false);
+
void ScanLibDirForGCCTriple(const llvm::Triple &TargetArch,
const llvm::opt::ArgList &Args,
const std::string &LibDir,
@@ -154,61 +212,15 @@ public:
const std::string &LibDir,
StringRef CandidateTriple,
bool NeedsBiarchSuffix = false);
+
+ bool ScanGentooGccConfig(const llvm::Triple &TargetTriple,
+ const llvm::opt::ArgList &Args,
+ StringRef CandidateTriple,
+ bool NeedsBiarchSuffix = false);
};
protected:
GCCInstallationDetector GCCInstallation;
-
- // \brief A class to find a viable CUDA installation
- class CudaInstallationDetector {
- private:
- const Driver &D;
- bool IsValid = false;
- CudaVersion Version = CudaVersion::UNKNOWN;
- std::string InstallPath;
- std::string BinPath;
- std::string LibPath;
- std::string LibDevicePath;
- std::string IncludePath;
- llvm::StringMap<std::string> LibDeviceMap;
-
- // CUDA architectures for which we have raised an error in
- // CheckCudaVersionSupportsArch.
- mutable llvm::SmallSet<CudaArch, 4> ArchsWithVersionTooLowErrors;
-
- public:
- CudaInstallationDetector(const Driver &D) : D(D) {}
- void init(const llvm::Triple &TargetTriple, const llvm::opt::ArgList &Args);
-
- /// \brief Emit an error if Version does not support the given Arch.
- ///
- /// If either Version or Arch is unknown, does not emit an error. Emits at
- /// most one error per Arch.
- void CheckCudaVersionSupportsArch(CudaArch Arch) const;
-
- /// \brief Check whether we detected a valid Cuda install.
- bool isValid() const { return IsValid; }
- /// \brief Print information about the detected CUDA installation.
- void print(raw_ostream &OS) const;
-
- /// \brief Get the deteced Cuda install's version.
- CudaVersion version() const { return Version; }
- /// \brief Get the detected Cuda installation path.
- StringRef getInstallPath() const { return InstallPath; }
- /// \brief Get the detected path to Cuda's bin directory.
- StringRef getBinPath() const { return BinPath; }
- /// \brief Get the detected Cuda Include path.
- StringRef getIncludePath() const { return IncludePath; }
- /// \brief Get the detected Cuda library path.
- StringRef getLibPath() const { return LibPath; }
- /// \brief Get the detected Cuda device library path.
- StringRef getLibDevicePath() const { return LibDevicePath; }
- /// \brief Get libdevice file for given architecture
- std::string getLibDeviceFile(StringRef Gpu) const {
- return LibDeviceMap.lookup(Gpu);
- }
- };
-
CudaInstallationDetector CudaInstallation;
public:
@@ -223,6 +235,9 @@ public:
bool isPIEDefault() const override;
bool isPICDefaultForced() const override;
bool IsIntegratedAssemblerDefault() const override;
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const override;
protected:
Tool *getTool(Action::ActionClass AC) const override;
@@ -238,6 +253,17 @@ protected:
/// \brief Check whether the target triple's architecture is 32-bits.
bool isTarget32Bit() const { return getTriple().isArch32Bit(); }
+ // FIXME: This should be final, but the Solaris tool chain does weird
+ // things we can't easily represent.
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ virtual std::string findLibCxxIncludePath() const;
+ virtual void
+ addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+
bool addLibStdCXXIncludePaths(Twine Base, Twine Suffix, StringRef GCCTriple,
StringRef GCCMultiarchTriple,
StringRef TargetMultiarchTriple,
@@ -313,16 +339,13 @@ public:
/// @name ToolChain Implementation
/// {
- std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
- types::ID InputType) const override;
-
- types::ID LookupTypeForExtension(const char *Ext) const override;
+ types::ID LookupTypeForExtension(StringRef Ext) const override;
bool HasNativeLLVMSupport() const override;
llvm::opt::DerivedArgList *
- TranslateArgs(const llvm::opt::DerivedArgList &Args,
- const char *BoundArch) const override;
+ TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const override;
bool IsBlocksDefault() const override {
// Always allow blocks on Apple; users interested in versioning are
@@ -393,6 +416,8 @@ public:
/// The OS version we are targeting.
mutable VersionTuple TargetVersion;
+ CudaInstallationDetector CudaInstallation;
+
private:
void AddDeploymentTarget(llvm::opt::DerivedArgList &Args) const;
@@ -526,13 +551,16 @@ public:
bool isCrossCompiling() const override { return false; }
llvm::opt::DerivedArgList *
- TranslateArgs(const llvm::opt::DerivedArgList &Args,
- const char *BoundArch) const override;
+ TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const override;
CXXStdlibType GetDefaultCXXStdlibType() const override;
ObjCRuntime getDefaultObjCRuntime(bool isNonFragile) const override;
bool hasBlocksRuntime() const override;
+ void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
bool UseObjCMixedDispatch() const override {
// This is only used with the non-fragile ABI and non-legacy dispatch.
@@ -562,6 +590,8 @@ public:
bool SupportsEmbeddedBitcode() const override;
SanitizerMask getSupportedSanitizers() const override;
+
+ void printVerboseInfo(raw_ostream &OS) const override;
};
/// DarwinClang - The Darwin toolchain used by Clang.
@@ -573,6 +603,8 @@ public:
/// @name Apple ToolChain Implementation
/// {
+ RuntimeLibType GetRuntimeLibType(const llvm::opt::ArgList &Args) const override;
+
void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
@@ -587,7 +619,7 @@ public:
void AddLinkARCArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
- unsigned GetDefaultDwarfVersion() const override { return 2; }
+ unsigned GetDefaultDwarfVersion() const override;
// Until dtrace (via CTF) and LLDB can deal with distributed debug info,
// Darwin defaults to standalone/full debug info.
bool GetDefaultStandaloneDebug() const override { return true; }
@@ -628,9 +660,7 @@ public:
GetCXXStdlibType(const llvm::opt::ArgList &Args) const override {
return ToolChain::CST_Libcxx;
}
- void AddClangCXXStdlibIncludeArgs(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
+ std::string findLibCxxIncludePath() const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
@@ -699,11 +729,14 @@ public:
Haiku(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
- bool isPIEDefault() const override { return getTriple().getArch() == llvm::Triple::x86_64; }
+ bool isPIEDefault() const override {
+ return getTriple().getArch() == llvm::Triple::x86_64;
+ }
- void
- AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
+ std::string findLibCxxIncludePath() const override;
+ void addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
};
class LLVM_LIBRARY_VISIBILITY OpenBSD : public Generic_ELF {
@@ -734,7 +767,7 @@ public:
bool IsObjCNonFragileABIDefault() const override { return true; }
CXXStdlibType GetDefaultCXXStdlibType() const override;
- void AddClangCXXStdlibIncludeArgs(
+ void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
@@ -758,7 +791,7 @@ public:
bool IsObjCNonFragileABIDefault() const override { return true; }
CXXStdlibType GetDefaultCXXStdlibType() const override;
- void AddClangCXXStdlibIncludeArgs(
+ void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
@@ -787,9 +820,11 @@ public:
CXXStdlibType GetDefaultCXXStdlibType() const override;
- void AddClangCXXStdlibIncludeArgs(
+ std::string findLibCxxIncludePath() const override;
+ void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+
bool IsUnwindTablesDefault() const override { return true; }
protected:
@@ -829,7 +864,8 @@ public:
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- void AddClangCXXStdlibIncludeArgs(
+ std::string findLibCxxIncludePath() const override;
+ void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
@@ -851,30 +887,45 @@ protected:
Tool *buildLinker() const override;
};
-class LLVM_LIBRARY_VISIBILITY CudaToolChain : public Linux {
+class LLVM_LIBRARY_VISIBILITY CudaToolChain : public ToolChain {
public:
CudaToolChain(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
+ const ToolChain &HostTC, const llvm::opt::ArgList &Args);
llvm::opt::DerivedArgList *
- TranslateArgs(const llvm::opt::DerivedArgList &Args,
- const char *BoundArch) const override;
+ TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const override;
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
// Never try to use the integrated assembler with CUDA; always fork out to
// ptxas.
bool useIntegratedAs() const override { return false; }
+ bool isCrossCompiling() const override { return true; }
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault() const override { return false; }
+ bool isPICDefaultForced() const override { return false; }
+ bool SupportsProfiling() const override { return false; }
+ bool SupportsObjCGC() const override { return false; }
void AddCudaIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- const Generic_GCC::CudaInstallationDetector &cudaInstallation() const {
- return CudaInstallation;
- }
- Generic_GCC::CudaInstallationDetector &cudaInstallation() {
- return CudaInstallation;
- }
+ void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+ const ToolChain &HostTC;
+ CudaInstallationDetector CudaInstallation;
protected:
Tool *buildAssembler() const override; // ptxas
@@ -895,9 +946,7 @@ public:
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
- void AddClangCXXStdlibIncludeArgs(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
+ std::string findLibCxxIncludePath() const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
@@ -912,6 +961,10 @@ public:
: RuntimeLibType::RLT_CompilerRT;
}
+ const char *getDefaultLinker() const override {
+ return "lld";
+ }
+
private:
Multilib SelectedMultilib;
std::string LibSuffix;
@@ -922,6 +975,13 @@ public:
LanaiToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args)
: Generic_ELF(D, Triple, Args) {}
+
+ // No support for finding a C++ standard library yet.
+ std::string findLibCxxIncludePath() const override { return ""; }
+ void addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override {}
+
bool IsIntegratedAssemblerDefault() const override { return true; }
};
@@ -939,7 +999,7 @@ public:
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- void AddClangCXXStdlibIncludeArgs(
+ void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
@@ -981,9 +1041,7 @@ public:
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- void AddClangCXXStdlibIncludeArgs(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
+ std::string findLibCxxIncludePath() const override;
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
@@ -1010,6 +1068,41 @@ private:
std::string NaClArmMacrosPath;
};
+class LLVM_LIBRARY_VISIBILITY Fuchsia : public Generic_ELF {
+public:
+ Fuchsia(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ bool isPIEDefault() const override { return true; }
+ bool HasNativeLLVMSupport() const override { return true; }
+ bool IsIntegratedAssemblerDefault() const override { return true; }
+ llvm::DebuggerKind getDefaultDebuggerTuning() const override {
+ return llvm::DebuggerKind::GDB;
+ }
+
+ RuntimeLibType
+ GetRuntimeLibType(const llvm::opt::ArgList &Args) const override;
+ CXXStdlibType
+ GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ std::string findLibCxxIncludePath() const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
+ const char *getDefaultLinker() const override {
+ return "lld";
+ }
+
+protected:
+ Tool *buildAssembler() const override;
+ Tool *buildLinker() const override;
+};
+
/// TCEToolChain - A tool chain using the llvm bitcode tools to perform
/// all subcommands. See http://tce.cs.tut.fi for our peculiar target.
class LLVM_LIBRARY_VISIBILITY TCEToolChain : public ToolChain {
@@ -1024,14 +1117,22 @@ public:
bool isPICDefaultForced() const override;
};
+/// Toolchain for little endian TCE cores.
+class LLVM_LIBRARY_VISIBILITY TCELEToolChain : public TCEToolChain {
+public:
+ TCELEToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ ~TCELEToolChain() override;
+};
+
class LLVM_LIBRARY_VISIBILITY MSVCToolChain : public ToolChain {
public:
MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
llvm::opt::DerivedArgList *
- TranslateArgs(const llvm::opt::DerivedArgList &Args,
- const char *BoundArch) const override;
+ TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const override;
bool IsIntegratedAssemblerDefault() const override;
bool IsUnwindTablesDefault() const override;
@@ -1057,7 +1158,9 @@ public:
bool getVisualStudioInstallDir(std::string &path) const;
bool getVisualStudioBinariesFolder(const char *clangProgramPath,
std::string &path) const;
- VersionTuple getMSVCVersionFromExe() const override;
+ VersionTuple
+ computeMSVCVersion(const Driver *D,
+ const llvm::opt::ArgList &Args) const override;
std::string ComputeEffectiveClangTriple(const llvm::opt::ArgList &Args,
types::ID InputType) const override;
@@ -1073,6 +1176,9 @@ protected:
Tool *buildLinker() const override;
Tool *buildAssembler() const override;
+private:
+ VersionTuple getMSVCVersionFromTriple() const;
+ VersionTuple getMSVCVersionFromExe() const;
};
class LLVM_LIBRARY_VISIBILITY CrossWindowsToolChain : public Generic_GCC {
@@ -1144,11 +1250,13 @@ public:
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- void AddClangCXXStdlibIncludeArgs(
+ std::string findLibCxxIncludePath() const override;
+ void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
Tool *SelectTool(const JobAction &JA) const override;
unsigned GetDefaultDwarfVersion() const override { return 2; }
+ SanitizerMask getSupportedSanitizers() const override;
protected:
Tool *buildLinker() const override;
@@ -1189,6 +1297,10 @@ private:
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ const char *getDefaultLinker() const override {
+ return "lld";
+ }
+
Tool *buildLinker() const override;
};
@@ -1197,6 +1309,12 @@ public:
PS4CPU(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+ // No support for finding a C++ standard library yet.
+ std::string findLibCxxIncludePath() const override { return ""; }
+ void addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override {}
+
bool IsMathErrnoDefault() const override { return false; }
bool IsObjCNonFragileABIDefault() const override { return true; }
bool HasNativeLLVMSupport() const override;
@@ -1217,6 +1335,20 @@ protected:
Tool *buildLinker() const override;
};
+class LLVM_LIBRARY_VISIBILITY Contiki : public Generic_ELF {
+public:
+ Contiki(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ // No support for finding a C++ standard library yet.
+ std::string findLibCxxIncludePath() const override { return ""; }
+ void addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override {}
+
+ SanitizerMask getSupportedSanitizers() const override;
+};
+
} // end namespace toolchains
} // end namespace driver
} // end namespace clang
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index 270ed0a4e756..2a367bb29aa5 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -40,8 +40,9 @@
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
#include "llvm/Support/Program.h"
-#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ScopedPrinter.h"
#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/YAMLParser.h"
#ifdef LLVM_ON_UNIX
#include <unistd.h> // For getuid().
@@ -53,7 +54,7 @@ using namespace clang;
using namespace llvm::opt;
static void handleTargetFeaturesGroup(const ArgList &Args,
- std::vector<const char *> &Features,
+ std::vector<StringRef> &Features,
OptSpecifier Group) {
for (const Arg *A : Args.filtered(Group)) {
StringRef Name = A->getOption().getName();
@@ -108,8 +109,6 @@ static const char *getSparcAsmModeForCPU(StringRef Name,
}
}
-/// CheckPreprocessingOptions - Perform some validation of preprocessing
-/// arguments that is shared with gcc.
static void CheckPreprocessingOptions(const Driver &D, const ArgList &Args) {
if (Arg *A = Args.getLastArg(options::OPT_C, options::OPT_CC)) {
if (!Args.hasArg(options::OPT_E) && !Args.hasArg(options::OPT__SLASH_P) &&
@@ -121,8 +120,6 @@ static void CheckPreprocessingOptions(const Driver &D, const ArgList &Args) {
}
}
-/// CheckCodeGenerationOptions - Perform some validation of code generation
-/// arguments that is shared with gcc.
static void CheckCodeGenerationOptions(const Driver &D, const ArgList &Args) {
// In gcc, only ARM checks this, but it seems reasonable to check universally.
if (Args.hasArg(options::OPT_static))
@@ -233,7 +230,8 @@ static void addDirectoryList(const ArgList &Args, ArgStringList &CmdArgs,
}
static void AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
- const ArgList &Args, ArgStringList &CmdArgs) {
+ const ArgList &Args, ArgStringList &CmdArgs,
+ const JobAction &JA) {
const Driver &D = TC.getDriver();
// Add extra linker input arguments which are not treated as inputs
@@ -241,6 +239,14 @@ static void AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
Args.AddAllArgValues(CmdArgs, options::OPT_Zlinker_input);
for (const auto &II : Inputs) {
+ // If the current tool chain refers to an OpenMP offloading host, we should
+ // ignore inputs that refer to OpenMP offloading devices - they will be
+ // embedded according to a proper linker script.
+ if (auto *IA = II.getAction())
+ if (JA.isHostOffloading(Action::OFK_OpenMP) &&
+ IA->isDeviceOffloading(Action::OFK_OpenMP))
+ continue;
+
if (!TC.HasNativeLLVMSupport() && types::isLLVMIR(II.getType()))
// Don't try to pass LLVM inputs unless we have native support.
D.Diag(diag::err_drv_no_linker_llvm_support) << TC.getTripleString();
@@ -274,6 +280,131 @@ static void AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
}
+/// Add OpenMP linker script arguments at the end of the argument list so that
+/// the fat binary is built by embedding each of the device images into the
+/// host. The linker script also defines a few symbols required by the code
+/// generation so that the images can be easily retrieved at runtime by the
+/// offloading library. This should be used only in tool chains that support
+/// linker scripts.
+static void AddOpenMPLinkerScript(const ToolChain &TC, Compilation &C,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ const JobAction &JA) {
+
+ // If this is not an OpenMP host toolchain, we don't need to do anything.
+ if (!JA.isHostOffloading(Action::OFK_OpenMP))
+ return;
+
+ // Create temporary linker script. Keep it if save-temps is enabled.
+ const char *LKS;
+ SmallString<256> Name = llvm::sys::path::filename(Output.getFilename());
+ if (C.getDriver().isSaveTempsEnabled()) {
+ llvm::sys::path::replace_extension(Name, "lk");
+ LKS = C.getArgs().MakeArgString(Name.c_str());
+ } else {
+ llvm::sys::path::replace_extension(Name, "");
+ Name = C.getDriver().GetTemporaryPath(Name, "lk");
+ LKS = C.addTempFile(C.getArgs().MakeArgString(Name.c_str()));
+ }
+
+ // Add linker script option to the command.
+ CmdArgs.push_back("-T");
+ CmdArgs.push_back(LKS);
+
+ // Create a buffer to write the contents of the linker script.
+ std::string LksBuffer;
+ llvm::raw_string_ostream LksStream(LksBuffer);
+
+ // Get the OpenMP offload tool chains so that we can extract the triple
+ // associated with each device input.
+ auto OpenMPToolChains = C.getOffloadToolChains<Action::OFK_OpenMP>();
+ assert(OpenMPToolChains.first != OpenMPToolChains.second &&
+ "No OpenMP toolchains??");
+
+ // Track the input file name and device triple in order to build the script,
+ // inserting binaries in the designated sections.
+ SmallVector<std::pair<std::string, const char *>, 8> InputBinaryInfo;
+
+ // Add commands to embed target binaries. We ensure that each section and
+ // image is 16-byte aligned. This is not mandatory, but increases the
+ // likelihood of data to be aligned with a cache block in several main host
+ // machines.
+ LksStream << "/*\n";
+ LksStream << " OpenMP Offload Linker Script\n";
+ LksStream << " *** Automatically generated by Clang ***\n";
+ LksStream << "*/\n";
+ LksStream << "TARGET(binary)\n";
+ auto DTC = OpenMPToolChains.first;
+ for (auto &II : Inputs) {
+ const Action *A = II.getAction();
+ // Is this a device linking action?
+ if (A && isa<LinkJobAction>(A) &&
+ A->isDeviceOffloading(Action::OFK_OpenMP)) {
+ assert(DTC != OpenMPToolChains.second &&
+ "More device inputs than device toolchains??");
+ InputBinaryInfo.push_back(std::make_pair(
+ DTC->second->getTriple().normalize(), II.getFilename()));
+ ++DTC;
+ LksStream << "INPUT(" << II.getFilename() << ")\n";
+ }
+ }
+
+ assert(DTC == OpenMPToolChains.second &&
+ "Less device inputs than device toolchains??");
+
+ LksStream << "SECTIONS\n";
+ LksStream << "{\n";
+ LksStream << " .omp_offloading :\n";
+ LksStream << " ALIGN(0x10)\n";
+ LksStream << " {\n";
+
+ for (auto &BI : InputBinaryInfo) {
+ LksStream << " . = ALIGN(0x10);\n";
+ LksStream << " PROVIDE_HIDDEN(.omp_offloading.img_start." << BI.first
+ << " = .);\n";
+ LksStream << " " << BI.second << "\n";
+ LksStream << " PROVIDE_HIDDEN(.omp_offloading.img_end." << BI.first
+ << " = .);\n";
+ }
+
+ LksStream << " }\n";
+ // Add commands to define host entries begin and end. We use 1-byte subalign
+ // so that the linker does not add any padding and the elements in this
+ // section form an array.
+ LksStream << " .omp_offloading.entries :\n";
+ LksStream << " ALIGN(0x10)\n";
+ LksStream << " SUBALIGN(0x01)\n";
+ LksStream << " {\n";
+ LksStream << " PROVIDE_HIDDEN(.omp_offloading.entries_begin = .);\n";
+ LksStream << " *(.omp_offloading.entries)\n";
+ LksStream << " PROVIDE_HIDDEN(.omp_offloading.entries_end = .);\n";
+ LksStream << " }\n";
+ LksStream << "}\n";
+ LksStream << "INSERT BEFORE .data\n";
+ LksStream.flush();
+
+ // Dump the contents of the linker script if the user requested that. We
+ // support this option to enable testing of behavior with -###.
+ if (C.getArgs().hasArg(options::OPT_fopenmp_dump_offload_linker_script))
+ llvm::errs() << LksBuffer;
+
+ // If this is a dry run, do not create the linker script file.
+ if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH))
+ return;
+
+ // Open script file and write the contents.
+ std::error_code EC;
+ llvm::raw_fd_ostream Lksf(LKS, EC, llvm::sys::fs::F_None);
+
+ if (EC) {
+ C.getDriver().Diag(clang::diag::err_unable_to_make_temp) << EC.message();
+ return;
+ }
+
+ Lksf << LksBuffer;
+}
+
/// \brief Determine whether Objective-C automated reference counting is
/// enabled.
static bool isObjCAutoRefCount(const ArgList &Args) {
@@ -296,38 +427,25 @@ static bool forwardToGCC(const Option &O) {
!O.hasFlag(options::DriverOption) && !O.hasFlag(options::LinkerInput);
}
-/// Add the C++ include args of other offloading toolchains. If this is a host
-/// job, the device toolchains are added. If this is a device job, the host
-/// toolchains will be added.
-static void addExtraOffloadCXXStdlibIncludeArgs(Compilation &C,
- const JobAction &JA,
- const ArgList &Args,
- ArgStringList &CmdArgs) {
-
- if (JA.isHostOffloading(Action::OFK_Cuda))
- C.getSingleOffloadToolChain<Action::OFK_Cuda>()
- ->AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
- else if (JA.isDeviceOffloading(Action::OFK_Cuda))
- C.getSingleOffloadToolChain<Action::OFK_Host>()
- ->AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
-
- // TODO: Add support for other programming models here.
-}
-
-/// Add the include args that are specific of each offloading programming model.
-static void addExtraOffloadSpecificIncludeArgs(Compilation &C,
- const JobAction &JA,
- const ArgList &Args,
- ArgStringList &CmdArgs) {
-
+/// Apply \a Work on the current tool chain \a RegularToolChain and any other
+/// offloading tool chain that is associated with the current action \a JA.
+static void
+forAllAssociatedToolChains(Compilation &C, const JobAction &JA,
+ const ToolChain &RegularToolChain,
+ llvm::function_ref<void(const ToolChain &)> Work) {
+ // Apply Work on the current/regular tool chain.
+ Work(RegularToolChain);
+
+ // Apply Work on all the offloading tool chains associated with the current
+ // action.
if (JA.isHostOffloading(Action::OFK_Cuda))
- C.getSingleOffloadToolChain<Action::OFK_Host>()->AddCudaIncludeArgs(
- Args, CmdArgs);
+ Work(*C.getSingleOffloadToolChain<Action::OFK_Cuda>());
else if (JA.isDeviceOffloading(Action::OFK_Cuda))
- C.getSingleOffloadToolChain<Action::OFK_Cuda>()->AddCudaIncludeArgs(
- Args, CmdArgs);
+ Work(*C.getSingleOffloadToolChain<Action::OFK_Host>());
- // TODO: Add support for other programming models here.
+ //
+ // TODO: Add support for other offloading programming models here.
+ //
}
void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
@@ -423,6 +541,13 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
}
}
+ // Add offload include arguments specific for CUDA. This must happen before
+ // we -I or -include anything else, because we must pick up the CUDA headers
+ // from the particular CUDA installation, rather than from e.g.
+ // /usr/local/include.
+ if (JA.isOffloading(Action::OFK_Cuda))
+ getToolChain().AddCudaIncludeArgs(Args, CmdArgs);
+
// Add -i* options, and automatically translate to
// -include-pch/-include-pth for transparent PCH support. It's
// wonky, but we include looking for .gch so we can support seamless
@@ -604,22 +729,22 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
// of an offloading programming model.
// Add C++ include arguments, if needed.
- if (types::isCXX(Inputs[0].getType())) {
- getToolChain().AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
- addExtraOffloadCXXStdlibIncludeArgs(C, JA, Args, CmdArgs);
- }
+ if (types::isCXX(Inputs[0].getType()))
+ forAllAssociatedToolChains(C, JA, getToolChain(),
+ [&Args, &CmdArgs](const ToolChain &TC) {
+ TC.AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
+ });
// Add system include arguments for all targets but IAMCU.
- if (!IsIAMCU) {
- getToolChain().AddClangSystemIncludeArgs(Args, CmdArgs);
- addExtraOffloadCXXStdlibIncludeArgs(C, JA, Args, CmdArgs);
- } else {
+ if (!IsIAMCU)
+ forAllAssociatedToolChains(C, JA, getToolChain(),
+ [&Args, &CmdArgs](const ToolChain &TC) {
+ TC.AddClangSystemIncludeArgs(Args, CmdArgs);
+ });
+ else {
// For IAMCU add special include arguments.
getToolChain().AddIAMCUIncludeArgs(Args, CmdArgs);
}
-
- // Add offload include arguments, if needed.
- addExtraOffloadSpecificIncludeArgs(C, JA, Args, CmdArgs);
}
// FIXME: Move to target hook.
@@ -703,7 +828,7 @@ static void getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch,
// FIXME: Use ARMTargetParser.
static void getARMHWDivFeatures(const Driver &D, const Arg *A,
const ArgList &Args, StringRef HWDiv,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
unsigned HWDivID = llvm::ARM::parseHWDiv(HWDiv);
if (!llvm::ARM::getHWDivFeatures(HWDivID, Features))
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
@@ -712,7 +837,7 @@ static void getARMHWDivFeatures(const Driver &D, const Arg *A,
// Handle -mfpu=.
static void getARMFPUFeatures(const Driver &D, const Arg *A,
const ArgList &Args, StringRef FPU,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
unsigned FPUID = llvm::ARM::parseFPU(FPU);
if (!llvm::ARM::getFPUFeatures(FPUID, Features))
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
@@ -720,13 +845,13 @@ static void getARMFPUFeatures(const Driver &D, const Arg *A,
// Decode ARM features from string like +[no]featureA+[no]featureB+...
static bool DecodeARMFeatures(const Driver &D, StringRef text,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
for (StringRef Feature : Split) {
- const char *FeatureName = llvm::ARM::getArchExtFeature(Feature);
- if (FeatureName)
+ StringRef FeatureName = llvm::ARM::getArchExtFeature(Feature);
+ if (!FeatureName.empty())
Features.push_back(FeatureName);
else
return false;
@@ -739,7 +864,7 @@ static bool DecodeARMFeatures(const Driver &D, StringRef text,
// to handle -march=native correctly.
static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef ArchName,
- std::vector<const char *> &Features,
+ std::vector<StringRef> &Features,
const llvm::Triple &Triple) {
std::pair<StringRef, StringRef> Split = ArchName.split("+");
@@ -752,7 +877,7 @@ static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
// Check -mcpu=. Needs ArchName to handle -mcpu=generic.
static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args,
llvm::StringRef CPUName, llvm::StringRef ArchName,
- std::vector<const char *> &Features,
+ std::vector<StringRef> &Features,
const llvm::Triple &Triple) {
std::pair<StringRef, StringRef> Split = CPUName.split("+");
@@ -773,7 +898,7 @@ static bool useAAPCSForMachO(const llvm::Triple &T) {
// -mfloat-abi=.
arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
const Driver &D = TC.getDriver();
- const llvm::Triple Triple(TC.ComputeEffectiveClangTriple(Args));
+ const llvm::Triple &Triple = TC.getEffectiveTriple();
auto SubArch = getARMSubArchVersionNumber(Triple);
arm::FloatABI ABI = FloatABI::Invalid;
if (Arg *A =
@@ -876,7 +1001,8 @@ arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
static void getARMTargetFeatures(const ToolChain &TC,
const llvm::Triple &Triple,
const ArgList &Args,
- std::vector<const char *> &Features,
+ ArgStringList &CmdArgs,
+ std::vector<StringRef> &Features,
bool ForAS) {
const Driver &D = TC.getDriver();
@@ -1014,6 +1140,29 @@ static void getARMTargetFeatures(const ToolChain &TC,
Features.push_back("+long-calls");
}
+ // Generate execute-only output (no data access to code sections).
+ // Supported only on ARMv6T2 and ARMv7 and above.
+ // Cannot be combined with -mno-movt or -mlong-calls
+ if (Arg *A = Args.getLastArg(options::OPT_mexecute_only, options::OPT_mno_execute_only)) {
+ if (A->getOption().matches(options::OPT_mexecute_only)) {
+ if (getARMSubArchVersionNumber(Triple) < 7 &&
+ llvm::ARM::parseArch(Triple.getArchName()) != llvm::ARM::AK_ARMV6T2)
+ D.Diag(diag::err_target_unsupported_execute_only) << Triple.getArchName();
+ else if (Arg *B = Args.getLastArg(options::OPT_mno_movt))
+ D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args);
+ // Long calls create constant pool entries and have not yet been fixed up
+ // to play nicely with execute-only. Hence, they cannot be used in
+ // execute-only code for now
+ else if (Arg *B = Args.getLastArg(options::OPT_mlong_calls, options::OPT_mno_long_calls)) {
+ if (B->getOption().matches(options::OPT_mlong_calls))
+ D.Diag(diag::err_opt_not_valid_with_opt) << A->getAsString(Args) << B->getAsString(Args);
+ }
+
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-arm-execute-only");
+ }
+ }
+
// Kernel code has more strict alignment requirements.
if (KernelOrKext)
Features.push_back("+strict-align");
@@ -1146,9 +1295,9 @@ void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
// ARM tools end.
/// getAArch64TargetCPU - Get the (LLVM) name of the AArch64 cpu we are
-/// targeting.
-static std::string getAArch64TargetCPU(const ArgList &Args) {
- Arg *A;
+/// targeting. Set \p A to the Arg corresponding to the -mcpu or -mtune
+/// arguments if they are provided, or to nullptr otherwise.
+static std::string getAArch64TargetCPU(const ArgList &Args, Arg *&A) {
std::string CPU;
// If we have -mtune or -mcpu, use that.
if ((A = Args.getLastArg(options::OPT_mtune_EQ))) {
@@ -1174,8 +1323,7 @@ static std::string getAArch64TargetCPU(const ArgList &Args) {
void Clang::AddAArch64TargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
- llvm::Triple Triple(TripleStr);
+ const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
if (!Args.hasFlag(options::OPT_mred_zone, options::OPT_mno_red_zone, true) ||
Args.hasArg(options::OPT_mkernel) ||
@@ -1373,7 +1521,7 @@ static mips::FloatABI getMipsFloatABI(const Driver &D, const ArgList &Args) {
}
static void AddTargetFeature(const ArgList &Args,
- std::vector<const char *> &Features,
+ std::vector<StringRef> &Features,
OptSpecifier OnOpt, OptSpecifier OffOpt,
StringRef FeatureName) {
if (Arg *A = Args.getLastArg(OnOpt, OffOpt)) {
@@ -1386,7 +1534,7 @@ static void AddTargetFeature(const ArgList &Args,
static void getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
StringRef CPUName;
StringRef ABIName;
mips::getMipsCPUAndABI(Args, Triple, CPUName, ABIName);
@@ -1597,19 +1745,12 @@ static std::string getPPCTargetCPU(const ArgList &Args) {
static void getPPCTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
handleTargetFeaturesGroup(Args, Features, options::OPT_m_ppc_Features_Group);
ppc::FloatABI FloatABI = ppc::getPPCFloatABI(D, Args);
- if (FloatABI == ppc::FloatABI::Soft &&
- !(Triple.getArch() == llvm::Triple::ppc64 ||
- Triple.getArch() == llvm::Triple::ppc64le))
- Features.push_back("+soft-float");
- else if (FloatABI == ppc::FloatABI::Soft &&
- (Triple.getArch() == llvm::Triple::ppc64 ||
- Triple.getArch() == llvm::Triple::ppc64le))
- D.Diag(diag::err_drv_invalid_mfloat_abi)
- << "soft float is not supported for ppc64";
+ if (FloatABI == ppc::FloatABI::Soft)
+ Features.push_back("-hard-float");
// Altivec is a bit weird, allow overriding of the Altivec feature here.
AddTargetFeature(Args, Features, options::OPT_faltivec,
@@ -1767,7 +1908,7 @@ sparc::FloatABI sparc::getSparcFloatABI(const Driver &D,
}
static void getSparcTargetFeatures(const Driver &D, const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
sparc::FloatABI FloatABI = sparc::getSparcFloatABI(D, Args);
if (FloatABI == sparc::FloatABI::Soft)
Features.push_back("+soft-float");
@@ -1804,7 +1945,7 @@ static const char *getSystemZTargetCPU(const ArgList &Args) {
}
static void getSystemZTargetFeatures(const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
// -m(no-)htm overrides use of the transactional-execution facility.
if (Arg *A = Args.getLastArg(options::OPT_mhtm, options::OPT_mno_htm)) {
if (A->getOption().matches(options::OPT_mhtm))
@@ -1875,6 +2016,11 @@ static const char *getX86TargetCPU(const ArgList &Args,
if (Triple.isOSDarwin()) {
if (Triple.getArchName() == "x86_64h")
return "core-avx2";
+ // macosx10.12 drops support for all pre-Penryn Macs.
+ // Simulators can still run on 10.11 though, like Xcode.
+ if (Triple.isMacOSX() && !Triple.isOSVersionLT(10, 12))
+ return "penryn";
+ // The oldest x86_64 Macs have core2/Merom; the oldest x86 Macs have Yonah.
return Is64Bit ? "core2" : "yonah";
}
@@ -1926,13 +2072,15 @@ static StringRef getWebAssemblyTargetCPU(const ArgList &Args) {
static std::string getCPUName(const ArgList &Args, const llvm::Triple &T,
bool FromAs = false) {
+ Arg *A;
+
switch (T.getArch()) {
default:
return "";
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- return getAArch64TargetCPU(Args);
+ return getAArch64TargetCPU(Args, A);
case llvm::Triple::arm:
case llvm::Triple::armeb:
@@ -2007,8 +2155,27 @@ static std::string getCPUName(const ArgList &Args, const llvm::Triple &T,
}
}
+static unsigned getLTOParallelism(const ArgList &Args, const Driver &D) {
+ unsigned Parallelism = 0;
+ Arg *LtoJobsArg = Args.getLastArg(options::OPT_flto_jobs_EQ);
+ if (LtoJobsArg &&
+ StringRef(LtoJobsArg->getValue()).getAsInteger(10, Parallelism))
+ D.Diag(diag::err_drv_invalid_int_value) << LtoJobsArg->getAsString(Args)
+ << LtoJobsArg->getValue();
+ return Parallelism;
+}
+
+// CloudABI and WebAssembly use -ffunction-sections and -fdata-sections by
+// default.
+static bool isUseSeparateSections(const llvm::Triple &Triple) {
+ return Triple.getOS() == llvm::Triple::CloudABI ||
+ Triple.getArch() == llvm::Triple::wasm32 ||
+ Triple.getArch() == llvm::Triple::wasm64;
+}
+
static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
- ArgStringList &CmdArgs, bool IsThinLTO) {
+ ArgStringList &CmdArgs, bool IsThinLTO,
+ const Driver &D) {
// Tell the linker to load the plugin. This has to come before AddLinkerInputs
// as gold requires -plugin to come before any -plugin-opt that -Wl might
// forward.
@@ -2041,6 +2208,10 @@ static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
if (IsThinLTO)
CmdArgs.push_back("-plugin-opt=thinlto");
+ if (unsigned Parallelism = getLTOParallelism(Args, D))
+ CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-opt=jobs=") +
+ llvm::to_string(Parallelism)));
+
// If an explicit debugger tuning argument appeared, pass it along.
if (Arg *A = Args.getLastArg(options::OPT_gTune_Group,
options::OPT_ggdbN_Group)) {
@@ -2051,6 +2222,19 @@ static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
else
CmdArgs.push_back("-plugin-opt=-debugger-tune=gdb");
}
+
+ bool UseSeparateSections =
+ isUseSeparateSections(ToolChain.getEffectiveTriple());
+
+ if (Args.hasFlag(options::OPT_ffunction_sections,
+ options::OPT_fno_function_sections, UseSeparateSections)) {
+ CmdArgs.push_back("-plugin-opt=-function-sections");
+ }
+
+ if (Args.hasFlag(options::OPT_fdata_sections, options::OPT_fno_data_sections,
+ UseSeparateSections)) {
+ CmdArgs.push_back("-plugin-opt=-data-sections");
+ }
}
/// This is a helper function for validating the optional refinement step
@@ -2186,7 +2370,7 @@ static void ParseMRecip(const Driver &D, const ArgList &Args,
static void getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
// If -march=native, autodetect the feature list.
if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
if (StringRef(A->getValue()) == "native") {
@@ -2345,29 +2529,14 @@ void Clang::AddWebAssemblyTargetArgs(const ArgList &Args,
// Decode AArch64 features from string like +[no]featureA+[no]featureB+...
static bool DecodeAArch64Features(const Driver &D, StringRef text,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
SmallVector<StringRef, 8> Split;
text.split(Split, StringRef("+"), -1, false);
for (StringRef Feature : Split) {
- const char *result = llvm::StringSwitch<const char *>(Feature)
- .Case("fp", "+fp-armv8")
- .Case("simd", "+neon")
- .Case("crc", "+crc")
- .Case("crypto", "+crypto")
- .Case("fp16", "+fullfp16")
- .Case("profile", "+spe")
- .Case("ras", "+ras")
- .Case("nofp", "-fp-armv8")
- .Case("nosimd", "-neon")
- .Case("nocrc", "-crc")
- .Case("nocrypto", "-crypto")
- .Case("nofp16", "-fullfp16")
- .Case("noprofile", "-spe")
- .Case("noras", "-ras")
- .Default(nullptr);
- if (result)
- Features.push_back(result);
+ StringRef FeatureName = llvm::AArch64::getArchExtFeature(Feature);
+ if (!FeatureName.empty())
+ Features.push_back(FeatureName);
else if (Feature == "neon" || Feature == "noneon")
D.Diag(diag::err_drv_no_neon_modifier);
else
@@ -2379,23 +2548,21 @@ static bool DecodeAArch64Features(const Driver &D, StringRef text,
// Check if the CPU name and feature modifiers in -mcpu are legal. If yes,
// decode CPU and feature.
static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
std::pair<StringRef, StringRef> Split = Mcpu.split("+");
CPU = Split.first;
- if (CPU == "cortex-a53" || CPU == "cortex-a57" ||
- CPU == "cortex-a72" || CPU == "cortex-a35" || CPU == "exynos-m1" ||
- CPU == "kryo" || CPU == "cortex-a73" || CPU == "vulcan") {
- Features.push_back("+neon");
- Features.push_back("+crc");
- Features.push_back("+crypto");
- } else if (CPU == "cyclone") {
- Features.push_back("+neon");
- Features.push_back("+crypto");
- } else if (CPU == "generic") {
+
+ if (CPU == "generic") {
Features.push_back("+neon");
} else {
- return false;
- }
+ unsigned ArchKind = llvm::AArch64::parseCPUArch(CPU);
+ if (!llvm::AArch64::getArchFeatures(ArchKind, Features))
+ return false;
+
+ unsigned Extension = llvm::AArch64::getDefaultExtensions(CPU, ArchKind);
+ if (!llvm::AArch64::getExtensionFeatures(Extension, Features))
+ return false;
+ }
if (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features))
return false;
@@ -2406,21 +2573,14 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
static bool
getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
std::string MarchLowerCase = March.lower();
std::pair<StringRef, StringRef> Split = StringRef(MarchLowerCase).split("+");
- if (Split.first == "armv8-a" || Split.first == "armv8a") {
- // ok, no additional features.
- } else if (Split.first == "armv8.1-a" || Split.first == "armv8.1a") {
- Features.push_back("+v8.1a");
- } else if (Split.first == "armv8.2-a" || Split.first == "armv8.2a" ) {
- Features.push_back("+v8.2a");
- } else {
- return false;
- }
-
- if (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features))
+ unsigned ArchKind = llvm::AArch64::parseArch(Split.first);
+ if (ArchKind == static_cast<unsigned>(llvm::AArch64::ArchKind::AK_INVALID) ||
+ !llvm::AArch64::getArchFeatures(ArchKind, Features) ||
+ (Split.second.size() && !DecodeAArch64Features(D, Split.second, Features)))
return false;
return true;
@@ -2429,7 +2589,7 @@ getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
static bool
getAArch64ArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
StringRef CPU;
std::string McpuLowerCase = Mcpu.lower();
if (!DecodeAArch64Mcpu(D, McpuLowerCase, CPU, Features))
@@ -2441,7 +2601,7 @@ getAArch64ArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
static bool
getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune,
const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
std::string MtuneLowerCase = Mtune.lower();
// Handle CPU name is 'native'.
if (MtuneLowerCase == "native")
@@ -2456,9 +2616,9 @@ getAArch64MicroArchFeaturesFromMtune(const Driver &D, StringRef Mtune,
static bool
getAArch64MicroArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
StringRef CPU;
- std::vector<const char *> DecodedFeature;
+ std::vector<StringRef> DecodedFeature;
std::string McpuLowerCase = Mcpu.lower();
if (!DecodeAArch64Mcpu(D, McpuLowerCase, CPU, DecodedFeature))
return false;
@@ -2467,7 +2627,7 @@ getAArch64MicroArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
}
static void getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
Arg *A;
bool success = true;
// Enable NEON by default.
@@ -2477,8 +2637,8 @@ static void getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
else if ((A = Args.getLastArg(options::OPT_mcpu_EQ)))
success = getAArch64ArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
else if (Args.hasArg(options::OPT_arch))
- success = getAArch64ArchFeaturesFromMcpu(D, getAArch64TargetCPU(Args), Args,
- Features);
+ success = getAArch64ArchFeaturesFromMcpu(D, getAArch64TargetCPU(Args, A),
+ Args, Features);
if (success && (A = Args.getLastArg(options::OPT_mtune_EQ)))
success =
@@ -2486,9 +2646,9 @@ static void getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
else if (success && (A = Args.getLastArg(options::OPT_mcpu_EQ)))
success =
getAArch64MicroArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
- else if (Args.hasArg(options::OPT_arch))
- success = getAArch64MicroArchFeaturesFromMcpu(D, getAArch64TargetCPU(Args),
- Args, Features);
+ else if (success && Args.hasArg(options::OPT_arch))
+ success = getAArch64MicroArchFeaturesFromMcpu(
+ D, getAArch64TargetCPU(Args, A), Args, Features);
if (!success)
D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
@@ -2517,38 +2677,27 @@ static void getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
}
static void getHexagonTargetFeatures(const ArgList &Args,
- std::vector<const char *> &Features) {
- bool HasHVX = false, HasHVXD = false;
-
- // FIXME: This should be able to use handleTargetFeaturesGroup except it is
- // doing dependent option handling here rather than in initFeatureMap or a
- // similar handler.
- for (auto &A : Args) {
- auto &Opt = A->getOption();
- if (Opt.matches(options::OPT_mhexagon_hvx))
- HasHVX = true;
- else if (Opt.matches(options::OPT_mno_hexagon_hvx))
- HasHVXD = HasHVX = false;
- else if (Opt.matches(options::OPT_mhexagon_hvx_double))
- HasHVXD = HasHVX = true;
- else if (Opt.matches(options::OPT_mno_hexagon_hvx_double))
- HasHVXD = false;
- else
- continue;
- A->claim();
+ std::vector<StringRef> &Features) {
+ handleTargetFeaturesGroup(Args, Features,
+ options::OPT_m_hexagon_Features_Group);
+
+ bool UseLongCalls = false;
+ if (Arg *A = Args.getLastArg(options::OPT_mlong_calls,
+ options::OPT_mno_long_calls)) {
+ if (A->getOption().matches(options::OPT_mlong_calls))
+ UseLongCalls = true;
}
- Features.push_back(HasHVX ? "+hvx" : "-hvx");
- Features.push_back(HasHVXD ? "+hvx-double" : "-hvx-double");
+ Features.push_back(UseLongCalls ? "+long-calls" : "-long-calls");
}
static void getWebAssemblyTargetFeatures(const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
handleTargetFeaturesGroup(Args, Features, options::OPT_m_wasm_Features_Group);
}
static void getAMDGPUTargetFeatures(const Driver &D, const ArgList &Args,
- std::vector<const char *> &Features) {
+ std::vector<StringRef> &Features) {
if (const Arg *dAbi = Args.getLastArg(options::OPT_mamdgpu_debugger_abi)) {
StringRef value = dAbi->getValue();
if (value == "1.0") {
@@ -2568,7 +2717,7 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
const ArgList &Args, ArgStringList &CmdArgs,
bool ForAS) {
const Driver &D = TC.getDriver();
- std::vector<const char *> Features;
+ std::vector<StringRef> Features;
switch (Triple.getArch()) {
default:
break;
@@ -2583,7 +2732,7 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- getARMTargetFeatures(TC, Triple, Args, Features, ForAS);
+ getARMTargetFeatures(TC, Triple, Args, CmdArgs, Features, ForAS);
break;
case llvm::Triple::ppc:
@@ -2608,7 +2757,7 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
case llvm::Triple::wasm32:
case llvm::Triple::wasm64:
getWebAssemblyTargetFeatures(Args, Features);
- break;
+ break;
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
case llvm::Triple::sparcv9:
@@ -2623,22 +2772,22 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
// Find the last of each feature.
llvm::StringMap<unsigned> LastOpt;
for (unsigned I = 0, N = Features.size(); I < N; ++I) {
- const char *Name = Features[I];
+ StringRef Name = Features[I];
assert(Name[0] == '-' || Name[0] == '+');
- LastOpt[Name + 1] = I;
+ LastOpt[Name.drop_front(1)] = I;
}
for (unsigned I = 0, N = Features.size(); I < N; ++I) {
// If this feature was overridden, ignore it.
- const char *Name = Features[I];
- llvm::StringMap<unsigned>::iterator LastI = LastOpt.find(Name + 1);
+ StringRef Name = Features[I];
+ llvm::StringMap<unsigned>::iterator LastI = LastOpt.find(Name.drop_front(1));
assert(LastI != LastOpt.end());
unsigned Last = LastI->second;
if (Last != I)
continue;
CmdArgs.push_back("-target-feature");
- CmdArgs.push_back(Name);
+ CmdArgs.push_back(Name.data());
}
}
@@ -2864,6 +3013,27 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
DefaultIncrementalLinkerCompatible))
CmdArgs.push_back("-mincremental-linker-compatible");
+ switch (C.getDefaultToolChain().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ if (Arg *A = Args.getLastArg(options::OPT_mimplicit_it_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "always" || Value == "never" || Value == "arm" ||
+ Value == "thumb") {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString("-arm-implicit-it=" + Value));
+ } else {
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Value;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
// When passing -I arguments to the assembler we sometimes need to
// unconditionally take the next argument. For example, when parsing
// '-Wa,-I -Wa,foo' we need to accept the -Wa,foo arg after seeing the
@@ -2976,6 +3146,27 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
} else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") ||
Value.startswith("-mhwdiv") || Value.startswith("-march")) {
// Do nothing, we'll validate it later.
+ } else if (Value == "-defsym") {
+ if (A->getNumValues() != 2) {
+ D.Diag(diag::err_drv_defsym_invalid_format) << Value;
+ break;
+ }
+ const char *S = A->getValue(1);
+ auto Pair = StringRef(S).split('=');
+ auto Sym = Pair.first;
+ auto SVal = Pair.second;
+
+ if (Sym.empty() || SVal.empty()) {
+ D.Diag(diag::err_drv_defsym_invalid_format) << S;
+ break;
+ }
+ int64_t IVal;
+ if (SVal.getAsInteger(0, IVal)) {
+ D.Diag(diag::err_drv_defsym_invalid_symval) << SVal;
+ break;
+ }
+ CmdArgs.push_back(Value.data());
+ TakeNextArg = true;
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
@@ -3004,72 +3195,23 @@ static void addClangRT(const ToolChain &TC, const ArgList &Args,
CmdArgs.push_back(TC.getCompilerRTArgString(Args, "builtins"));
}
-namespace {
-enum OpenMPRuntimeKind {
- /// An unknown OpenMP runtime. We can't generate effective OpenMP code
- /// without knowing what runtime to target.
- OMPRT_Unknown,
-
- /// The LLVM OpenMP runtime. When completed and integrated, this will become
- /// the default for Clang.
- OMPRT_OMP,
-
- /// The GNU OpenMP runtime. Clang doesn't support generating OpenMP code for
- /// this runtime but can swallow the pragmas, and find and link against the
- /// runtime library itself.
- OMPRT_GOMP,
-
- /// The legacy name for the LLVM OpenMP runtime from when it was the Intel
- /// OpenMP runtime. We support this mode for users with existing dependencies
- /// on this runtime library name.
- OMPRT_IOMP5
-};
-}
-
-/// Compute the desired OpenMP runtime from the flag provided.
-static OpenMPRuntimeKind getOpenMPRuntime(const ToolChain &TC,
- const ArgList &Args) {
- StringRef RuntimeName(CLANG_DEFAULT_OPENMP_RUNTIME);
-
- const Arg *A = Args.getLastArg(options::OPT_fopenmp_EQ);
- if (A)
- RuntimeName = A->getValue();
-
- auto RT = llvm::StringSwitch<OpenMPRuntimeKind>(RuntimeName)
- .Case("libomp", OMPRT_OMP)
- .Case("libgomp", OMPRT_GOMP)
- .Case("libiomp5", OMPRT_IOMP5)
- .Default(OMPRT_Unknown);
-
- if (RT == OMPRT_Unknown) {
- if (A)
- TC.getDriver().Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << A->getValue();
- else
- // FIXME: We could use a nicer diagnostic here.
- TC.getDriver().Diag(diag::err_drv_unsupported_opt) << "-fopenmp";
- }
-
- return RT;
-}
-
static void addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
const ArgList &Args) {
if (!Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
options::OPT_fno_openmp, false))
return;
- switch (getOpenMPRuntime(TC, Args)) {
- case OMPRT_OMP:
+ switch (TC.getDriver().getOpenMPRuntime(Args)) {
+ case Driver::OMPRT_OMP:
CmdArgs.push_back("-lomp");
break;
- case OMPRT_GOMP:
+ case Driver::OMPRT_GOMP:
CmdArgs.push_back("-lgomp");
break;
- case OMPRT_IOMP5:
+ case Driver::OMPRT_IOMP5:
CmdArgs.push_back("-liomp5");
break;
- case OMPRT_Unknown:
+ case Driver::OMPRT_Unknown:
// Already diagnosed.
break;
}
@@ -3103,11 +3245,15 @@ static void linkSanitizerRuntimeDeps(const ToolChain &TC,
// Force linking against the system libraries sanitizers depends on
// (see PR15823 why this is necessary).
CmdArgs.push_back("--no-as-needed");
- CmdArgs.push_back("-lpthread");
- CmdArgs.push_back("-lrt");
+ // There's no libpthread or librt on RTEMS.
+ if (TC.getTriple().getOS() != llvm::Triple::RTEMS) {
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-lrt");
+ }
CmdArgs.push_back("-lm");
- // There's no libdl on FreeBSD.
- if (TC.getTriple().getOS() != llvm::Triple::FreeBSD)
+ // There's no libdl on FreeBSD or RTEMS.
+ if (TC.getTriple().getOS() != llvm::Triple::FreeBSD &&
+ TC.getTriple().getOS() != llvm::Triple::RTEMS)
CmdArgs.push_back("-ldl");
}
@@ -3207,6 +3353,11 @@ static bool addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
// to be dynamic to be sure we export sanitizer interface functions.
if (AddExportDynamic)
CmdArgs.push_back("-export-dynamic");
+
+ const SanitizerArgs &SanArgs = TC.getSanitizerArgs();
+ if (SanArgs.hasCrossDsoCfi() && !AddExportDynamic)
+ CmdArgs.push_back("-export-dynamic-symbol=__cfi_check");
+
return !StaticRuntimes.empty();
}
@@ -3245,8 +3396,20 @@ static bool areOptimizationsEnabled(const ArgList &Args) {
return false;
}
-static bool shouldUseFramePointerForTarget(const ArgList &Args,
- const llvm::Triple &Triple) {
+static bool mustUseFramePointerForTarget(const llvm::Triple &Triple) {
+ switch (Triple.getArch()){
+ default:
+ return false;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ // ARM Darwin targets require a frame pointer to be always present to aid
+ // offline debugging via backtraces.
+ return Triple.isOSDarwin();
+ }
+}
+
+static bool useFramePointerForTargetByDefault(const ArgList &Args,
+ const llvm::Triple &Triple) {
switch (Triple.getArch()) {
case llvm::Triple::xcore:
case llvm::Triple::wasm32:
@@ -3298,25 +3461,29 @@ static bool shouldUseFramePointer(const ArgList &Args,
const llvm::Triple &Triple) {
if (Arg *A = Args.getLastArg(options::OPT_fno_omit_frame_pointer,
options::OPT_fomit_frame_pointer))
- return A->getOption().matches(options::OPT_fno_omit_frame_pointer);
+ return A->getOption().matches(options::OPT_fno_omit_frame_pointer) ||
+ mustUseFramePointerForTarget(Triple);
+
if (Args.hasArg(options::OPT_pg))
return true;
- return shouldUseFramePointerForTarget(Args, Triple);
+ return useFramePointerForTargetByDefault(Args, Triple);
}
static bool shouldUseLeafFramePointer(const ArgList &Args,
const llvm::Triple &Triple) {
if (Arg *A = Args.getLastArg(options::OPT_mno_omit_leaf_frame_pointer,
options::OPT_momit_leaf_frame_pointer))
- return A->getOption().matches(options::OPT_mno_omit_leaf_frame_pointer);
+ return A->getOption().matches(options::OPT_mno_omit_leaf_frame_pointer) ||
+ mustUseFramePointerForTarget(Triple);
+
if (Args.hasArg(options::OPT_pg))
return true;
if (Triple.isPS4CPU())
return false;
- return shouldUseFramePointerForTarget(Args, Triple);
+ return useFramePointerForTargetByDefault(Args, Triple);
}
/// Add a CC1 option to specify the debug compilation directory.
@@ -3416,19 +3583,6 @@ static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
CmdArgs.push_back(types::getTypeName(Input.getType()));
}
-static VersionTuple getMSCompatibilityVersion(unsigned Version) {
- if (Version < 100)
- return VersionTuple(Version);
-
- if (Version < 10000)
- return VersionTuple(Version / 100, Version % 100);
-
- unsigned Build = 0, Factor = 1;
- for (; Version > 10000; Version = Version / 10, Factor = Factor * 10)
- Build = Build + (Version % 10) * Factor;
- return VersionTuple(Version / 100, Version % 100, Build);
-}
-
// Claim options we don't want to warn if they are unused. We do this for
// options that build systems might add but are unused when assembling or only
// running the preprocessor for example.
@@ -3472,58 +3626,17 @@ static void appendUserToPath(SmallVectorImpl<char> &Result) {
Result.append(UID.begin(), UID.end());
}
-VersionTuple visualstudio::getMSVCVersion(const Driver *D, const ToolChain &TC,
- const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args,
- bool IsWindowsMSVC) {
- if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
- IsWindowsMSVC) ||
- Args.hasArg(options::OPT_fmsc_version) ||
- Args.hasArg(options::OPT_fms_compatibility_version)) {
- const Arg *MSCVersion = Args.getLastArg(options::OPT_fmsc_version);
- const Arg *MSCompatibilityVersion =
- Args.getLastArg(options::OPT_fms_compatibility_version);
-
- if (MSCVersion && MSCompatibilityVersion) {
- if (D)
- D->Diag(diag::err_drv_argument_not_allowed_with)
- << MSCVersion->getAsString(Args)
- << MSCompatibilityVersion->getAsString(Args);
- return VersionTuple();
- }
-
- if (MSCompatibilityVersion) {
- VersionTuple MSVT;
- if (MSVT.tryParse(MSCompatibilityVersion->getValue()) && D)
- D->Diag(diag::err_drv_invalid_value)
- << MSCompatibilityVersion->getAsString(Args)
- << MSCompatibilityVersion->getValue();
- return MSVT;
- }
-
- if (MSCVersion) {
- unsigned Version = 0;
- if (StringRef(MSCVersion->getValue()).getAsInteger(10, Version) && D)
- D->Diag(diag::err_drv_invalid_value) << MSCVersion->getAsString(Args)
- << MSCVersion->getValue();
- return getMSCompatibilityVersion(Version);
- }
-
- unsigned Major, Minor, Micro;
- Triple.getEnvironmentVersion(Major, Minor, Micro);
- if (Major || Minor || Micro)
- return VersionTuple(Major, Minor, Micro);
+static Arg *getLastProfileUseArg(const ArgList &Args) {
+ auto *ProfileUseArg = Args.getLastArg(
+ options::OPT_fprofile_instr_use, options::OPT_fprofile_instr_use_EQ,
+ options::OPT_fprofile_use, options::OPT_fprofile_use_EQ,
+ options::OPT_fno_profile_instr_use);
- if (IsWindowsMSVC) {
- VersionTuple MSVT = TC.getMSVCVersionFromExe();
- if (!MSVT.empty())
- return MSVT;
+ if (ProfileUseArg &&
+ ProfileUseArg->getOption().matches(options::OPT_fno_profile_instr_use))
+ ProfileUseArg = nullptr;
- // FIXME: Consider bumping this to 19 (MSVC2015) soon.
- return VersionTuple(18);
- }
- }
- return VersionTuple();
+ return ProfileUseArg;
}
static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
@@ -3550,13 +3663,7 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
D.Diag(diag::err_drv_argument_not_allowed_with)
<< PGOGenerateArg->getSpelling() << ProfileGenerateArg->getSpelling();
- auto *ProfileUseArg = Args.getLastArg(
- options::OPT_fprofile_instr_use, options::OPT_fprofile_instr_use_EQ,
- options::OPT_fprofile_use, options::OPT_fprofile_use_EQ,
- options::OPT_fno_profile_instr_use);
- if (ProfileUseArg &&
- ProfileUseArg->getOption().matches(options::OPT_fno_profile_instr_use))
- ProfileUseArg = nullptr;
+ auto *ProfileUseArg = getLastProfileUseArg(Args);
if (PGOGenerateArg && ProfileUseArg)
D.Diag(diag::err_drv_argument_not_allowed_with)
@@ -3580,7 +3687,7 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
if (PGOGenerateArg->getOption().matches(
options::OPT_fprofile_generate_EQ)) {
SmallString<128> Path(PGOGenerateArg->getValue());
- llvm::sys::path::append(Path, "default.profraw");
+ llvm::sys::path::append(Path, "default_%m.profraw");
CmdArgs.push_back(
Args.MakeArgString(Twine("-fprofile-instrument-path=") + Path));
}
@@ -3625,13 +3732,13 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
if (C.getArgs().hasArg(options::OPT_c) ||
C.getArgs().hasArg(options::OPT_S)) {
if (Output.isFilename()) {
- CmdArgs.push_back("-coverage-file");
- SmallString<128> CoverageFilename;
- if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o)) {
- CoverageFilename = FinalOutput->getValue();
- } else {
- CoverageFilename = llvm::sys::path::filename(Output.getBaseInput());
- }
+ CmdArgs.push_back("-coverage-notes-file");
+ SmallString<128> OutputFilename;
+ if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
+ OutputFilename = FinalOutput->getValue();
+ else
+ OutputFilename = llvm::sys::path::filename(Output.getBaseInput());
+ SmallString<128> CoverageFilename = OutputFilename;
if (llvm::sys::path::is_relative(CoverageFilename)) {
SmallString<128> Pwd;
if (!llvm::sys::fs::current_path(Pwd)) {
@@ -3639,7 +3746,23 @@ static void addPGOAndCoverageFlags(Compilation &C, const Driver &D,
CoverageFilename.swap(Pwd);
}
}
+ llvm::sys::path::replace_extension(CoverageFilename, "gcno");
CmdArgs.push_back(Args.MakeArgString(CoverageFilename));
+
+ // Leave -fprofile-dir= an unused argument unless .gcda emission is
+ // enabled. To be polite, with '-fprofile-arcs -fno-profile-arcs' consider
+ // the flag used. There is no -fno-profile-dir, so the user has no
+ // targeted way to suppress the warning.
+ if (Args.hasArg(options::OPT_fprofile_arcs) ||
+ Args.hasArg(options::OPT_coverage)) {
+ CmdArgs.push_back("-coverage-data-file");
+ if (Arg *FProfileDir = Args.getLastArg(options::OPT_fprofile_dir)) {
+ CoverageFilename = FProfileDir->getValue();
+ llvm::sys::path::append(CoverageFilename, OutputFilename);
+ }
+ llvm::sys::path::replace_extension(CoverageFilename, "gcda");
+ CmdArgs.push_back(Args.MakeArgString(CoverageFilename));
+ }
}
}
}
@@ -3666,15 +3789,14 @@ static void addPS4ProfileRTArgs(const ToolChain &TC, const ArgList &Args,
/// this compile should be using PIC mode or not. Returns a tuple of
/// (RelocationModel, PICLevel, IsPIE).
static std::tuple<llvm::Reloc::Model, unsigned, bool>
-ParsePICArgs(const ToolChain &ToolChain, const llvm::Triple &Triple,
- const ArgList &Args) {
- // FIXME: why does this code...and so much everywhere else, use both
- // ToolChain.getTriple() and Triple?
+ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
+ const llvm::Triple &EffectiveTriple = ToolChain.getEffectiveTriple();
+ const llvm::Triple &Triple = ToolChain.getTriple();
+
bool PIE = ToolChain.isPIEDefault();
bool PIC = PIE || ToolChain.isPICDefault();
// The Darwin/MachO default to use PIC does not apply when using -static.
- if (ToolChain.getTriple().isOSBinFormatMachO() &&
- Args.hasArg(options::OPT_static))
+ if (Triple.isOSBinFormatMachO() && Args.hasArg(options::OPT_static))
PIE = PIC = false;
bool IsPICLevelTwo = PIC;
@@ -3682,8 +3804,8 @@ ParsePICArgs(const ToolChain &ToolChain, const llvm::Triple &Triple,
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
// Android-specific defaults for PIC/PIE
- if (ToolChain.getTriple().isAndroid()) {
- switch (ToolChain.getArch()) {
+ if (Triple.isAndroid()) {
+ switch (Triple.getArch()) {
case llvm::Triple::arm:
case llvm::Triple::armeb:
case llvm::Triple::thumb:
@@ -3708,7 +3830,7 @@ ParsePICArgs(const ToolChain &ToolChain, const llvm::Triple &Triple,
}
// OpenBSD-specific defaults for PIE
- if (ToolChain.getTriple().getOS() == llvm::Triple::OpenBSD) {
+ if (Triple.getOS() == llvm::Triple::OpenBSD) {
switch (ToolChain.getArch()) {
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
@@ -3737,6 +3859,17 @@ ParsePICArgs(const ToolChain &ToolChain, const llvm::Triple &Triple,
options::OPT_fpic, options::OPT_fno_pic,
options::OPT_fPIE, options::OPT_fno_PIE,
options::OPT_fpie, options::OPT_fno_pie);
+ if (Triple.isOSWindows() && LastPICArg &&
+ LastPICArg ==
+ Args.getLastArg(options::OPT_fPIC, options::OPT_fpic,
+ options::OPT_fPIE, options::OPT_fpie)) {
+ ToolChain.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
+ << LastPICArg->getSpelling() << Triple.str();
+ if (Triple.getArch() == llvm::Triple::x86_64)
+ return std::make_tuple(llvm::Reloc::PIC_, 2U, false);
+ return std::make_tuple(llvm::Reloc::Static, 0U, false);
+ }
+
// Check whether the tool chain trumps the PIC-ness decision. If the PIC-ness
// is forced, then neither PIC nor PIE flags will have no effect.
if (!ToolChain.isPICDefaultForced()) {
@@ -3751,7 +3884,7 @@ ParsePICArgs(const ToolChain &ToolChain, const llvm::Triple &Triple,
O.matches(options::OPT_fPIE) || O.matches(options::OPT_fPIC);
} else {
PIE = PIC = false;
- if (Triple.isPS4CPU()) {
+ if (EffectiveTriple.isPS4CPU()) {
Arg *ModelArg = Args.getLastArg(options::OPT_mcmodel_EQ);
StringRef Model = ModelArg ? ModelArg->getValue() : "";
if (Model != "kernel") {
@@ -3767,21 +3900,22 @@ ParsePICArgs(const ToolChain &ToolChain, const llvm::Triple &Triple,
// Introduce a Darwin and PS4-specific hack. If the default is PIC, but the
// PIC level would've been set to level 1, force it back to level 2 PIC
// instead.
- if (PIC && (ToolChain.getTriple().isOSDarwin() || Triple.isPS4CPU()))
+ if (PIC && (Triple.isOSDarwin() || EffectiveTriple.isPS4CPU()))
IsPICLevelTwo |= ToolChain.isPICDefault();
// This kernel flags are a trump-card: they will disable PIC/PIE
// generation, independent of the argument order.
- if (KernelOrKext && ((!Triple.isiOS() || Triple.isOSVersionLT(6)) &&
- !Triple.isWatchOS()))
+ if (KernelOrKext &&
+ ((!EffectiveTriple.isiOS() || EffectiveTriple.isOSVersionLT(6)) &&
+ !EffectiveTriple.isWatchOS()))
PIC = PIE = false;
if (Arg *A = Args.getLastArg(options::OPT_mdynamic_no_pic)) {
// This is a very special mode. It trumps the other modes, almost no one
// uses it, and it isn't even valid on any OS but Darwin.
- if (!ToolChain.getTriple().isOSDarwin())
+ if (!Triple.isOSDarwin())
ToolChain.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
- << A->getSpelling() << ToolChain.getTriple().str();
+ << A->getSpelling() << Triple.str();
// FIXME: Warn when this flag trumps some other PIC or PIE flag.
@@ -3790,13 +3924,54 @@ ParsePICArgs(const ToolChain &ToolChain, const llvm::Triple &Triple,
// match that of llvm-gcc and Apple GCC before that.
PIC = ToolChain.isPICDefault() && ToolChain.isPICDefaultForced();
- return std::make_tuple(llvm::Reloc::DynamicNoPIC, PIC ? 2 : 0, false);
+ return std::make_tuple(llvm::Reloc::DynamicNoPIC, PIC ? 2U : 0U, false);
}
+ bool EmbeddedPISupported;
+ switch (Triple.getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ EmbeddedPISupported = true;
+ break;
+ default:
+ EmbeddedPISupported = false;
+ break;
+ }
+
+ bool ROPI = false, RWPI = false;
+ Arg* LastROPIArg = Args.getLastArg(options::OPT_fropi, options::OPT_fno_ropi);
+ if (LastROPIArg && LastROPIArg->getOption().matches(options::OPT_fropi)) {
+ if (!EmbeddedPISupported)
+ ToolChain.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
+ << LastROPIArg->getSpelling() << Triple.str();
+ ROPI = true;
+ }
+ Arg *LastRWPIArg = Args.getLastArg(options::OPT_frwpi, options::OPT_fno_rwpi);
+ if (LastRWPIArg && LastRWPIArg->getOption().matches(options::OPT_frwpi)) {
+ if (!EmbeddedPISupported)
+ ToolChain.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
+ << LastRWPIArg->getSpelling() << Triple.str();
+ RWPI = true;
+ }
+
+ // ROPI and RWPI are not comaptible with PIC or PIE.
+ if ((ROPI || RWPI) && (PIC || PIE))
+ ToolChain.getDriver().Diag(diag::err_drv_ropi_rwpi_incompatible_with_pic);
+
if (PIC)
- return std::make_tuple(llvm::Reloc::PIC_, IsPICLevelTwo ? 2 : 1, PIE);
+ return std::make_tuple(llvm::Reloc::PIC_, IsPICLevelTwo ? 2U : 1U, PIE);
+
+ llvm::Reloc::Model RelocM = llvm::Reloc::Static;
+ if (ROPI && RWPI)
+ RelocM = llvm::Reloc::ROPI_RWPI;
+ else if (ROPI)
+ RelocM = llvm::Reloc::ROPI;
+ else if (RWPI)
+ RelocM = llvm::Reloc::RWPI;
- return std::make_tuple(llvm::Reloc::Static, 0, false);
+ return std::make_tuple(RelocM, 0U, false);
}
static const char *RelocationModelName(llvm::Reloc::Model Model) {
@@ -3807,6 +3982,12 @@ static const char *RelocationModelName(llvm::Reloc::Model Model) {
return "pic";
case llvm::Reloc::DynamicNoPIC:
return "dynamic-no-pic";
+ case llvm::Reloc::ROPI:
+ return "ropi";
+ case llvm::Reloc::RWPI:
+ return "rwpi";
+ case llvm::Reloc::ROPI_RWPI:
+ return "ropi-rwpi";
}
llvm_unreachable("Unknown Reloc::Model kind");
}
@@ -3816,18 +3997,76 @@ static void AddAssemblerKPIC(const ToolChain &ToolChain, const ArgList &Args,
llvm::Reloc::Model RelocationModel;
unsigned PICLevel;
bool IsPIE;
- std::tie(RelocationModel, PICLevel, IsPIE) =
- ParsePICArgs(ToolChain, ToolChain.getTriple(), Args);
+ std::tie(RelocationModel, PICLevel, IsPIE) = ParsePICArgs(ToolChain, Args);
if (RelocationModel != llvm::Reloc::Static)
CmdArgs.push_back("-KPIC");
}
+void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
+ StringRef Target, const InputInfo &Output,
+ const InputInfo &Input, const ArgList &Args) const {
+ // If this is a dry run, do not create the compilation database file.
+ if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH))
+ return;
+
+ using llvm::yaml::escape;
+ const Driver &D = getToolChain().getDriver();
+
+ if (!CompilationDatabase) {
+ std::error_code EC;
+ auto File = llvm::make_unique<llvm::raw_fd_ostream>(Filename, EC, llvm::sys::fs::F_Text);
+ if (EC) {
+ D.Diag(clang::diag::err_drv_compilationdatabase) << Filename
+ << EC.message();
+ return;
+ }
+ CompilationDatabase = std::move(File);
+ }
+ auto &CDB = *CompilationDatabase;
+ SmallString<128> Buf;
+ if (llvm::sys::fs::current_path(Buf))
+ Buf = ".";
+ CDB << "{ \"directory\": \"" << escape(Buf) << "\"";
+ CDB << ", \"file\": \"" << escape(Input.getFilename()) << "\"";
+ CDB << ", \"output\": \"" << escape(Output.getFilename()) << "\"";
+ CDB << ", \"arguments\": [\"" << escape(D.ClangExecutable) << "\"";
+ Buf = "-x";
+ Buf += types::getTypeName(Input.getType());
+ CDB << ", \"" << escape(Buf) << "\"";
+ if (!D.SysRoot.empty() && !Args.hasArg(options::OPT__sysroot_EQ)) {
+ Buf = "--sysroot=";
+ Buf += D.SysRoot;
+ CDB << ", \"" << escape(Buf) << "\"";
+ }
+ CDB << ", \"" << escape(Input.getFilename()) << "\"";
+ for (auto &A: Args) {
+ auto &O = A->getOption();
+ // Skip language selection, which is positional.
+ if (O.getID() == options::OPT_x)
+ continue;
+ // Skip writing dependency output and the compilation database itself.
+ if (O.getGroup().isValid() && O.getGroup().getID() == options::OPT_M_Group)
+ continue;
+ // Skip inputs.
+ if (O.getKind() == Option::InputClass)
+ continue;
+ // All other arguments are quoted and appended.
+ ArgStringList ASL;
+ A->render(Args, ASL);
+ for (auto &it: ASL)
+ CDB << ", \"" << escape(it) << "\"";
+ }
+ Buf = "--target=";
+ Buf += Target;
+ CDB << ", \"" << escape(Buf) << "\"]},\n";
+}
+
void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const ArgList &Args, const char *LinkingOutput) const {
- std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
- const llvm::Triple Triple(TripleStr);
+ const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
+ const std::string &TripleStr = Triple.getTriple();
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
@@ -3845,10 +4084,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
assert(Inputs.size() >= 1 && "Must have at least one input.");
const InputInfo &Input = Inputs[0];
// CUDA compilation may have multiple inputs (source file + results of
- // device-side compilations). All other jobs are expected to have exactly one
+ // device-side compilations). OpenMP device jobs also take the host IR as a
+ // second input. All other jobs are expected to have exactly one
// input.
bool IsCuda = JA.isOffloading(Action::OFK_Cuda);
- assert((IsCuda || Inputs.size() == 1) && "Unable to handle multiple inputs.");
+ bool IsOpenMPDevice = JA.isDeviceOffloading(Action::OFK_OpenMP);
+ assert((IsCuda || (IsOpenMPDevice && Inputs.size() == 2) ||
+ Inputs.size() == 1) &&
+ "Unable to handle multiple inputs.");
// C++ is not supported for IAMCU.
if (IsIAMCU && types::isCXX(Input.getType()))
@@ -3863,6 +4106,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-triple");
CmdArgs.push_back(Args.MakeArgString(TripleStr));
+ if (const Arg *MJ = Args.getLastArg(options::OPT_MJ)) {
+ DumpCompilationDatabase(C, MJ->getValue(), TripleStr, Output, Input, Args);
+ Args.ClaimAllArgs(options::OPT_MJ);
+ }
+
if (IsCuda) {
// We have to pass the triple of the host if compiling for a CUDA device and
// vice-versa.
@@ -3925,6 +4173,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (JA.getType() == types::TY_Nothing)
CmdArgs.push_back("-fsyntax-only");
+ else if (JA.getType() == types::TY_ModuleFile)
+ CmdArgs.push_back("-emit-module-interface");
else if (UsePCH)
CmdArgs.push_back("-emit-pch");
else
@@ -3977,12 +4227,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// Embed-bitcode option.
- if (C.getDriver().embedBitcodeEnabled() &&
+ if (C.getDriver().embedBitcodeInObject() &&
(isa<BackendJobAction>(JA) || isa<AssembleJobAction>(JA))) {
// Add flags implied by -fembed-bitcode.
Args.AddLastArg(CmdArgs, options::OPT_fembed_bitcode_EQ);
// Disable all llvm IR level optimizations.
- CmdArgs.push_back("-disable-llvm-optzns");
+ CmdArgs.push_back("-disable-llvm-passes");
}
if (C.getDriver().embedBitcodeMarkerOnly())
CmdArgs.push_back("-fembed-bitcode=marker");
@@ -4022,6 +4272,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Add default argument set.
if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
CmdArgs.push_back("-analyzer-checker=core");
+ CmdArgs.push_back("-analyzer-checker=apiModeling");
if (!IsWindowsMSVC) {
CmdArgs.push_back("-analyzer-checker=unix");
@@ -4088,9 +4339,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
unsigned PICLevel;
bool IsPIE;
std::tie(RelocationModel, PICLevel, IsPIE) =
- ParsePICArgs(getToolChain(), Triple, Args);
+ ParsePICArgs(getToolChain(), Args);
const char *RMName = RelocationModelName(RelocationModel);
+
+ if ((RelocationModel == llvm::Reloc::ROPI ||
+ RelocationModel == llvm::Reloc::ROPI_RWPI) &&
+ types::isCXX(Input.getType()) &&
+ !Args.hasArg(options::OPT_fallow_unsupported))
+ D.Diag(diag::err_drv_ropi_incompatible_with_cxx);
+
if (RMName) {
CmdArgs.push_back("-mrelocation-model");
CmdArgs.push_back(RMName);
@@ -4125,9 +4383,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.hasArg(options::OPT_frewrite_map_file_EQ)) {
for (const Arg *A : Args.filtered(options::OPT_frewrite_map_file,
options::OPT_frewrite_map_file_EQ)) {
- CmdArgs.push_back("-frewrite-map-file");
- CmdArgs.push_back(A->getValue());
- A->claim();
+ StringRef Map = A->getValue();
+ if (!llvm::sys::fs::exists(Map)) {
+ D.Diag(diag::err_drv_no_such_file) << Map;
+ } else {
+ CmdArgs.push_back("-frewrite-map-file");
+ CmdArgs.push_back(A->getValue());
+ A->claim();
+ }
}
}
@@ -4142,6 +4405,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
true))
CmdArgs.push_back("-fno-jump-tables");
+ if (!Args.hasFlag(options::OPT_fpreserve_as_comments,
+ options::OPT_fno_preserve_as_comments, true))
+ CmdArgs.push_back("-fno-preserve-as-comments");
+
if (Arg *A = Args.getLastArg(options::OPT_mregparm_EQ)) {
CmdArgs.push_back("-mregparm");
CmdArgs.push_back(A->getValue());
@@ -4300,6 +4567,19 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (ReciprocalMath)
CmdArgs.push_back("-freciprocal-math");
+ if (!TrappingMath)
+ CmdArgs.push_back("-fno-trapping-math");
+
+
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math, FastMathAliasOption,
+ options::OPT_fno_fast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_fdenormal_fp_math_EQ))
+ if (A->getOption().getID() != options::OPT_fno_fast_math &&
+ A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations)
+ Args.AddLastArg(CmdArgs, options::OPT_fdenormal_fp_math_EQ);
+
// Validate and pass through -fp-contract option.
if (Arg *A = Args.getLastArg(options::OPT_ffast_math, FastMathAliasOption,
options::OPT_fno_fast_math,
@@ -4374,6 +4654,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mms-bitfields");
}
+ if (Args.hasFlag(options::OPT_mpie_copy_relocations,
+ options::OPT_mno_pie_copy_relocations,
+ false)) {
+ CmdArgs.push_back("-mpie-copy-relocations");
+ }
+
// This is a coarse approximation of what llvm-gcc actually does, both
// -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
// complicated ways.
@@ -4535,6 +4821,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
: "-");
}
+ bool splitDwarfInlining =
+ Args.hasFlag(options::OPT_fsplit_dwarf_inlining,
+ options::OPT_fno_split_dwarf_inlining, true);
+
Args.ClaimAllArgs(options::OPT_g_Group);
Arg *SplitDwarfArg = Args.getLastArg(options::OPT_gsplit_dwarf);
if (Arg *A = Args.getLastArg(options::OPT_g_Group)) {
@@ -4544,9 +4834,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// If you say "-gsplit-dwarf -gline-tables-only", -gsplit-dwarf loses.
// But -gsplit-dwarf is not a g_group option, hence we have to check the
// order explicitly. (If -gsplit-dwarf wins, we fix DebugInfoKind later.)
- if (SplitDwarfArg && DebugInfoKind < codegenoptions::LimitedDebugInfo &&
- A->getIndex() > SplitDwarfArg->getIndex())
- SplitDwarfArg = nullptr;
+ // This gets a bit more complicated if you've disabled inline info in the
+ // skeleton CUs (splitDwarfInlining) - then there's value in composing
+ // split-dwarf and line-tables-only, so let those compose naturally in
+ // that case.
+ // And if you just turned off debug info, (-gsplit-dwarf -g0) - do that.
+ if (SplitDwarfArg) {
+ if (A->getIndex() > SplitDwarfArg->getIndex()) {
+ if (DebugInfoKind == codegenoptions::NoDebugInfo ||
+ (DebugInfoKind == codegenoptions::DebugLineTablesOnly &&
+ splitDwarfInlining))
+ SplitDwarfArg = nullptr;
+ } else if (splitDwarfInlining)
+ DebugInfoKind = codegenoptions::NoDebugInfo;
+ }
} else
// For any other 'g' option, use Limited.
DebugInfoKind = codegenoptions::LimitedDebugInfo;
@@ -4581,13 +4882,19 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// We ignore flags -gstrict-dwarf and -grecord-gcc-switches for now.
Args.ClaimAllArgs(options::OPT_g_flags_Group);
- // PS4 defaults to no column info
+ // Column info is included by default for everything except PS4 and CodeView.
+ // Clang doesn't track end columns, just starting columns, which, in theory,
+ // is fine for CodeView (and PDB). In practice, however, the Microsoft
+ // debuggers don't handle missing end columns well, so it's better not to
+ // include any column info.
if (Args.hasFlag(options::OPT_gcolumn_info, options::OPT_gno_column_info,
- /*Default=*/ !IsPS4CPU))
+ /*Default=*/ !IsPS4CPU && !(IsWindowsMSVC && EmitCodeView)))
CmdArgs.push_back("-dwarf-column-info");
// FIXME: Move backend command line options to the module.
- if (Args.hasArg(options::OPT_gmodules)) {
+ // If -gline-tables-only is the last option it wins.
+ if (DebugInfoKind != codegenoptions::DebugLineTablesOnly &&
+ Args.hasArg(options::OPT_gmodules)) {
DebugInfoKind = codegenoptions::LimitedDebugInfo;
CmdArgs.push_back("-dwarf-ext-refs");
CmdArgs.push_back("-fmodule-format=obj");
@@ -4597,7 +4904,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// splitting and extraction.
// FIXME: Currently only works on Linux.
if (getToolChain().getTriple().isOSLinux() && SplitDwarfArg) {
- DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ if (!splitDwarfInlining)
+ CmdArgs.push_back("-fno-split-dwarf-inlining");
+ if (DebugInfoKind == codegenoptions::NoDebugInfo)
+ DebugInfoKind = codegenoptions::LimitedDebugInfo;
CmdArgs.push_back("-backend-option");
CmdArgs.push_back("-split-dwarf=Enable");
}
@@ -4635,11 +4945,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-generate-type-units");
}
- // CloudABI and WebAssembly use -ffunction-sections and -fdata-sections by
- // default.
- bool UseSeparateSections = Triple.getOS() == llvm::Triple::CloudABI ||
- Triple.getArch() == llvm::Triple::wasm32 ||
- Triple.getArch() == llvm::Triple::wasm64;
+ bool UseSeparateSections = isUseSeparateSections(Triple);
if (Args.hasFlag(options::OPT_ffunction_sections,
options::OPT_fno_function_sections, UseSeparateSections)) {
@@ -4659,7 +4965,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_fxray_instrument,
options::OPT_fnoxray_instrument, false)) {
- CmdArgs.push_back("-fxray-instrument");
+ const char *const XRayInstrumentOption = "-fxray-instrument";
+ if (Triple.getOS() == llvm::Triple::Linux)
+ switch (Triple.getArch()) {
+ case llvm::Triple::x86_64:
+ case llvm::Triple::arm:
+ case llvm::Triple::aarch64:
+ // Supported.
+ break;
+ default:
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on " + Triple.str());
+ }
+ else
+ D.Diag(diag::err_drv_clang_unsupported)
+ << (std::string(XRayInstrumentOption) + " on non-Linux target OS");
+ CmdArgs.push_back(XRayInstrumentOption);
if (const Arg *A =
Args.getLastArg(options::OPT_fxray_instruction_threshold_,
options::OPT_fxray_instruction_threshold_EQ)) {
@@ -4794,6 +5115,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
claimNoWarnArgs(Args);
Args.AddAllArgs(CmdArgs, options::OPT_R_Group);
+
Args.AddAllArgs(CmdArgs, options::OPT_W_Group);
if (Args.hasFlag(options::OPT_pedantic, options::OPT_no_pedantic, false))
CmdArgs.push_back("-pedantic");
@@ -4998,9 +5320,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_ftlsmodel_EQ);
// -fhosted is default.
+ bool IsHosted = true;
if (Args.hasFlag(options::OPT_ffreestanding, options::OPT_fhosted, false) ||
- KernelOrKext)
+ KernelOrKext) {
CmdArgs.push_back("-ffreestanding");
+ IsHosted = false;
+ }
// Forward -f (flag) options which we can pass directly.
Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
@@ -5021,15 +5346,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fno_elide_type);
// Forward flags for OpenMP. We don't do this if the current action is an
- // device offloading action.
- //
- // TODO: Allow OpenMP offload actions when they become available.
+ // device offloading action other than OpenMP.
if (Args.hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
options::OPT_fno_openmp, false) &&
- JA.isDeviceOffloading(Action::OFK_None)) {
- switch (getOpenMPRuntime(getToolChain(), Args)) {
- case OMPRT_OMP:
- case OMPRT_IOMP5:
+ (JA.isDeviceOffloading(Action::OFK_None) ||
+ JA.isDeviceOffloading(Action::OFK_OpenMP))) {
+ switch (getToolChain().getDriver().getOpenMPRuntime(Args)) {
+ case Driver::OMPRT_OMP:
+ case Driver::OMPRT_IOMP5:
// Clang can generate useful OpenMP code for these two runtime libraries.
CmdArgs.push_back("-fopenmp");
@@ -5134,6 +5458,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
} else {
StackProtectorLevel =
getToolChain().GetDefaultStackProtectorLevel(KernelOrKext);
+ // Only use a default stack protector on Darwin in case -ffreestanding
+ // is not specified.
+ if (Triple.isOSDarwin() && !IsHosted)
+ StackProtectorLevel = 0;
}
if (StackProtectorLevel) {
CmdArgs.push_back("-stack-protector");
@@ -5239,6 +5567,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.getLastArg(options::OPT_cl_denorms_are_zero)) {
CmdArgs.push_back("-cl-denorms-are-zero");
}
+ if (Args.getLastArg(options::OPT_cl_fp32_correctly_rounded_divide_sqrt)) {
+ CmdArgs.push_back("-cl-fp32-correctly-rounded-divide-sqrt");
+ }
// Forward -f options with positive and negative forms; we translate
// these by hand.
@@ -5294,23 +5625,35 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fblocks-runtime-optional");
}
+ if (Args.hasFlag(options::OPT_fcoroutines_ts, options::OPT_fno_coroutines_ts,
+ false) &&
+ types::isCXX(InputType)) {
+ CmdArgs.push_back("-fcoroutines-ts");
+ }
+
// -fmodules enables the use of precompiled modules (off by default).
// Users can pass -fno-cxx-modules to turn off modules support for
// C++/Objective-C++ programs.
- bool HaveModules = false;
+ bool HaveClangModules = false;
if (Args.hasFlag(options::OPT_fmodules, options::OPT_fno_modules, false)) {
bool AllowedInCXX = Args.hasFlag(options::OPT_fcxx_modules,
options::OPT_fno_cxx_modules, true);
if (AllowedInCXX || !types::isCXX(InputType)) {
CmdArgs.push_back("-fmodules");
- HaveModules = true;
+ HaveClangModules = true;
}
}
+ bool HaveAnyModules = HaveClangModules;
+ if (Args.hasArg(options::OPT_fmodules_ts)) {
+ CmdArgs.push_back("-fmodules-ts");
+ HaveAnyModules = true;
+ }
+
// -fmodule-maps enables implicit reading of module map files. By default,
- // this is enabled if we are using precompiled modules.
+ // this is enabled if we are using Clang's flavor of precompiled modules.
if (Args.hasFlag(options::OPT_fimplicit_module_maps,
- options::OPT_fno_implicit_module_maps, HaveModules)) {
+ options::OPT_fno_implicit_module_maps, HaveClangModules)) {
CmdArgs.push_back("-fimplicit-module-maps");
}
@@ -5330,9 +5673,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fno-implicit-modules turns off implicitly compiling modules on demand.
if (!Args.hasFlag(options::OPT_fimplicit_modules,
- options::OPT_fno_implicit_modules)) {
- CmdArgs.push_back("-fno-implicit-modules");
- } else if (HaveModules) {
+ options::OPT_fno_implicit_modules, HaveClangModules)) {
+ if (HaveAnyModules)
+ CmdArgs.push_back("-fno-implicit-modules");
+ } else if (HaveAnyModules) {
// -fmodule-cache-path specifies where our implicitly-built module files
// should be written.
SmallString<128> Path;
@@ -5356,6 +5700,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Path));
}
+ if (HaveAnyModules) {
+ // -fprebuilt-module-path specifies where to load the prebuilt module files.
+ for (const Arg *A : Args.filtered(options::OPT_fprebuilt_module_path))
+ CmdArgs.push_back(Args.MakeArgString(
+ std::string("-fprebuilt-module-path=") + A->getValue()));
+ }
+
// -fmodule-name specifies the module that is currently being built (or
// used for header checking by -fmodule-maps).
Args.AddLastArg(CmdArgs, options::OPT_fmodule_name_EQ);
@@ -5364,15 +5715,27 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// definitions.
Args.AddAllArgs(CmdArgs, options::OPT_fmodule_map_file);
+ // -fbuiltin-module-map can be used to load the clang
+ // builtin headers modulemap file.
+ if (Args.hasArg(options::OPT_fbuiltin_module_map)) {
+ SmallString<128> BuiltinModuleMap(getToolChain().getDriver().ResourceDir);
+ llvm::sys::path::append(BuiltinModuleMap, "include");
+ llvm::sys::path::append(BuiltinModuleMap, "module.modulemap");
+ if (llvm::sys::fs::exists(BuiltinModuleMap)) {
+ CmdArgs.push_back(Args.MakeArgString("-fmodule-map-file=" +
+ BuiltinModuleMap));
+ }
+ }
+
// -fmodule-file can be used to specify files containing precompiled modules.
- if (HaveModules)
+ if (HaveAnyModules)
Args.AddAllArgs(CmdArgs, options::OPT_fmodule_file);
else
Args.ClaimAllArgs(options::OPT_fmodule_file);
// When building modules and generating crashdumps, we need to dump a module
// dependency VFS alongside the output.
- if (HaveModules && C.isForDiagnostics()) {
+ if (HaveClangModules && C.isForDiagnostics()) {
SmallString<128> VFSDir(Output.getFilename());
llvm::sys::path::replace_extension(VFSDir, ".cache");
// Add the cache directory as a temp so the crash diagnostics pick it up.
@@ -5383,7 +5746,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(VFSDir));
}
- if (HaveModules)
+ if (HaveClangModules)
Args.AddLastArg(CmdArgs, options::OPT_fmodules_user_build_path);
// Pass through all -fmodules-ignore-macro arguments.
@@ -5401,9 +5764,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
llvm::sys::fs::file_status Status;
if (llvm::sys::fs::status(A->getValue(), Status))
D.Diag(diag::err_drv_no_such_file) << A->getValue();
- CmdArgs.push_back(Args.MakeArgString(
- "-fbuild-session-timestamp=" +
- Twine((uint64_t)Status.getLastModificationTime().toEpochTime())));
+ CmdArgs.push_back(
+ Args.MakeArgString("-fbuild-session-timestamp=" +
+ Twine((uint64_t)Status.getLastModificationTime()
+ .time_since_epoch()
+ .count())));
}
if (Args.getLastArg(options::OPT_fmodules_validate_once_per_build_session)) {
@@ -5416,6 +5781,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.AddLastArg(CmdArgs, options::OPT_fmodules_validate_system_headers);
+ Args.AddLastArg(CmdArgs, options::OPT_fmodules_disable_diagnostic_validation);
// -faccess-control is default.
if (Args.hasFlag(options::OPT_fno_access_control,
@@ -5482,9 +5848,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_ms_extensions, true))))
CmdArgs.push_back("-fms-compatibility");
- // -fms-compatibility-version=18.00 is default.
- VersionTuple MSVT = visualstudio::getMSVCVersion(
- &D, getToolChain(), getToolChain().getTriple(), Args, IsWindowsMSVC);
+ VersionTuple MSVT =
+ getToolChain().computeMSVCVersion(&getToolChain().getDriver(), Args);
if (!MSVT.empty())
CmdArgs.push_back(
Args.MakeArgString("-fms-compatibility-version=" + MSVT.getAsString()));
@@ -5555,10 +5920,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_inline_functions))
InlineArg->render(Args, CmdArgs);
+ Args.AddLastArg(CmdArgs, options::OPT_fexperimental_new_pass_manager,
+ options::OPT_fno_experimental_new_pass_manager);
+
ObjCRuntime objcRuntime = AddObjCRuntimeArgs(Args, CmdArgs, rewriteKind);
// -fobjc-dispatch-method is only relevant with the nonfragile-abi, and
- // legacy is the default. Except for deployment taget of 10.5,
+ // legacy is the default. Except for deployment target of 10.5,
// next runtime is always legacy dispatch and -fno-objc-legacy-dispatch
// gets ignored silently.
if (objcRuntime.isNonFragile()) {
@@ -5620,31 +5988,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (rewriteKind != RK_None)
CmdArgs.push_back("-fno-objc-infer-related-result-type");
- // Handle -fobjc-gc and -fobjc-gc-only. They are exclusive, and -fobjc-gc-only
- // takes precedence.
- const Arg *GCArg = Args.getLastArg(options::OPT_fobjc_gc_only);
- if (!GCArg)
- GCArg = Args.getLastArg(options::OPT_fobjc_gc);
- if (GCArg) {
- if (ARC) {
- D.Diag(diag::err_drv_objc_gc_arr) << GCArg->getAsString(Args);
- } else if (getToolChain().SupportsObjCGC()) {
- GCArg->render(Args, CmdArgs);
- } else {
- // FIXME: We should move this to a hard error.
- D.Diag(diag::warn_drv_objc_gc_unsupported) << GCArg->getAsString(Args);
- }
- }
-
// Pass down -fobjc-weak or -fno-objc-weak if present.
if (types::isObjC(InputType)) {
auto WeakArg = Args.getLastArg(options::OPT_fobjc_weak,
options::OPT_fno_objc_weak);
if (!WeakArg) {
// nothing to do
- } else if (GCArg) {
- if (WeakArg->getOption().matches(options::OPT_fobjc_weak))
- D.Diag(diag::err_objc_weak_with_gc);
} else if (!objcRuntime.allowsWeak()) {
if (WeakArg->getOption().matches(options::OPT_fobjc_weak))
D.Diag(diag::err_objc_weak_unsupported);
@@ -5671,12 +6020,37 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_assume_sane_operator_new))
CmdArgs.push_back("-fno-assume-sane-operator-new");
+ // -frelaxed-template-template-args is off by default, as it is a severe
+ // breaking change until a corresponding change to template partial ordering
+ // is provided.
+ if (Args.hasFlag(options::OPT_frelaxed_template_template_args,
+ options::OPT_fno_relaxed_template_template_args, false))
+ CmdArgs.push_back("-frelaxed-template-template-args");
+
// -fsized-deallocation is off by default, as it is an ABI-breaking change for
// most platforms.
if (Args.hasFlag(options::OPT_fsized_deallocation,
options::OPT_fno_sized_deallocation, false))
CmdArgs.push_back("-fsized-deallocation");
+ // -faligned-allocation is on by default in C++17 onwards and otherwise off
+ // by default.
+ if (Arg *A = Args.getLastArg(options::OPT_faligned_allocation,
+ options::OPT_fno_aligned_allocation,
+ options::OPT_faligned_new_EQ)) {
+ if (A->getOption().matches(options::OPT_fno_aligned_allocation))
+ CmdArgs.push_back("-fno-aligned-allocation");
+ else
+ CmdArgs.push_back("-faligned-allocation");
+ }
+
+ // The default new alignment can be specified using a dedicated option or via
+ // a GCC-compatible option that also turns on aligned allocation.
+ if (Arg *A = Args.getLastArg(options::OPT_fnew_alignment_EQ,
+ options::OPT_faligned_new_EQ))
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-fnew-alignment=") + A->getValue()));
+
// -fconstant-cfstrings is default, and may be subject to argument translation
// on Darwin.
if (!Args.hasFlag(options::OPT_fconstant_cfstrings,
@@ -5744,7 +6118,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -finput_charset=UTF-8 is default. Reject others
if (Arg *inputCharset = Args.getLastArg(options::OPT_finput_charset_EQ)) {
StringRef value = inputCharset->getValue();
- if (value != "UTF-8")
+ if (!value.equals_lower("utf-8"))
D.Diag(diag::err_drv_invalid_value) << inputCharset->getAsString(Args)
<< value;
}
@@ -5752,7 +6126,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fexec_charset=UTF-8 is default. Reject others
if (Arg *execCharset = Args.getLastArg(options::OPT_fexec_charset_EQ)) {
StringRef value = execCharset->getValue();
- if (value != "UTF-8")
+ if (!value.equals_lower("utf-8"))
D.Diag(diag::err_drv_invalid_value) << execCharset->getAsString(Args)
<< value;
}
@@ -5778,6 +6152,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
+ if (Args.hasFlag(options::OPT_fdiagnostics_show_hotness,
+ options::OPT_fno_diagnostics_show_hotness, false))
+ CmdArgs.push_back("-fdiagnostics-show-hotness");
+
if (const Arg *A = Args.getLastArg(options::OPT_fdiagnostics_format_EQ)) {
CmdArgs.push_back("-fdiagnostics-format");
CmdArgs.push_back(A->getValue());
@@ -5823,6 +6201,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_show_source_location))
CmdArgs.push_back("-fno-show-source-location");
+ if (Args.hasArg(options::OPT_fdiagnostics_absolute_paths))
+ CmdArgs.push_back("-fdiagnostics-absolute-paths");
+
if (!Args.hasFlag(options::OPT_fshow_column, options::OPT_fno_show_column,
true))
CmdArgs.push_back("-fno-show-column");
@@ -5900,6 +6281,40 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fno-math-builtin");
}
+ if (Args.hasFlag(options::OPT_fsave_optimization_record,
+ options::OPT_fno_save_optimization_record, false)) {
+ CmdArgs.push_back("-opt-record-file");
+
+ const Arg *A = Args.getLastArg(options::OPT_foptimization_record_file_EQ);
+ if (A) {
+ CmdArgs.push_back(A->getValue());
+ } else {
+ SmallString<128> F;
+ if (Output.isFilename() && (Args.hasArg(options::OPT_c) ||
+ Args.hasArg(options::OPT_S))) {
+ F = Output.getFilename();
+ } else {
+ // Use the input filename.
+ F = llvm::sys::path::stem(Input.getBaseInput());
+
+ // If we're compiling for an offload architecture (i.e. a CUDA device),
+ // we need to make the file name for the device compilation different
+ // from the host compilation.
+ if (!JA.isDeviceOffloading(Action::OFK_None) &&
+ !JA.isDeviceOffloading(Action::OFK_Host)) {
+ llvm::sys::path::replace_extension(F, "");
+ F += Action::GetOffloadingFileNamePrefix(JA.getOffloadingDeviceKind(),
+ Triple.normalize());
+ F += "-";
+ F += JA.getOffloadingArch();
+ }
+ }
+
+ llvm::sys::path::replace_extension(F, "opt.yaml");
+ CmdArgs.push_back(Args.MakeArgString(F));
+ }
+ }
+
// Default to -fno-builtin-str{cat,cpy} on Darwin for ARM.
//
// FIXME: Now that PR4941 has been fixed this can be enabled.
@@ -5920,7 +6335,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// nice to enable this when doing a crashdump for modules as well.
if (Args.hasFlag(options::OPT_frewrite_includes,
options::OPT_fno_rewrite_includes, false) ||
- (C.isForDiagnostics() && !HaveModules))
+ (C.isForDiagnostics() && !HaveAnyModules))
CmdArgs.push_back("-frewrite-includes");
// Only allow -traditional or -traditional-cpp outside in preprocessing modes.
@@ -5956,6 +6371,33 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->claim();
}
+ // Setup statistics file output.
+ if (const Arg *A = Args.getLastArg(options::OPT_save_stats_EQ)) {
+ StringRef SaveStats = A->getValue();
+
+ SmallString<128> StatsFile;
+ bool DoSaveStats = false;
+ if (SaveStats == "obj") {
+ if (Output.isFilename()) {
+ StatsFile.assign(Output.getFilename());
+ llvm::sys::path::remove_filename(StatsFile);
+ }
+ DoSaveStats = true;
+ } else if (SaveStats == "cwd") {
+ DoSaveStats = true;
+ } else {
+ D.Diag(diag::err_drv_invalid_value) << A->getAsString(Args) << SaveStats;
+ }
+
+ if (DoSaveStats) {
+ StringRef BaseName = llvm::sys::path::filename(Input.getBaseInput());
+ llvm::sys::path::append(StatsFile, BaseName);
+ llvm::sys::path::replace_extension(StatsFile, "stats");
+ CmdArgs.push_back(Args.MakeArgString(Twine("-stats-file=") +
+ StatsFile));
+ }
+ }
+
// Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option
// parser.
Args.AddAllArgValues(CmdArgs, options::OPT_Xclang);
@@ -5964,8 +6406,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// We translate this by hand to the -cc1 argument, since nightly test uses
// it and developers have been trained to spell it with -mllvm.
- if (StringRef(A->getValue(0)) == "-disable-llvm-optzns") {
- CmdArgs.push_back("-disable-llvm-optzns");
+ if (StringRef(A->getValue(0)) == "-disable-llvm-passes") {
+ CmdArgs.push_back("-disable-llvm-passes");
} else
A->render(Args, CmdArgs);
}
@@ -5979,7 +6421,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// pristine IR generated by the frontend. Ideally, a new compile action should
// be added so both IR can be captured.
if (C.getDriver().isSaveTempsEnabled() &&
- !C.getDriver().embedBitcodeEnabled() && isa<CompileJobAction>(JA))
+ !C.getDriver().embedBitcodeInObject() && isa<CompileJobAction>(JA))
CmdArgs.push_back("-disable-llvm-passes");
if (Output.getType() == types::TY_Dependencies) {
@@ -6041,6 +6483,36 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(I->getFilename());
}
+ // OpenMP offloading device jobs take the argument -fopenmp-host-ir-file-path
+ // to specify the result of the compile phase on the host, so the meaningful
+ // device declarations can be identified. Also, -fopenmp-is-device is passed
+ // along to tell the frontend that it is generating code for a device, so that
+ // only the relevant declarations are emitted.
+ if (IsOpenMPDevice && Inputs.size() == 2) {
+ CmdArgs.push_back("-fopenmp-is-device");
+ CmdArgs.push_back("-fopenmp-host-ir-file-path");
+ CmdArgs.push_back(Args.MakeArgString(Inputs.back().getFilename()));
+ }
+
+ // For all the host OpenMP offloading compile jobs we need to pass the targets
+ // information using -fopenmp-targets= option.
+ if (isa<CompileJobAction>(JA) && JA.isHostOffloading(Action::OFK_OpenMP)) {
+ SmallString<128> TargetInfo("-fopenmp-targets=");
+
+ Arg *Tgts = Args.getLastArg(options::OPT_fopenmp_targets_EQ);
+ assert(Tgts && Tgts->getNumValues() &&
+ "OpenMP offloading has to have targets specified.");
+ for (unsigned i = 0; i < Tgts->getNumValues(); ++i) {
+ if (i)
+ TargetInfo += ',';
+ // We need to get the string from the triple because it may be not exactly
+ // the same as the one we get directly from the arguments.
+ llvm::Triple T(Tgts->getValue(i));
+ TargetInfo += T.getTriple();
+ }
+ CmdArgs.push_back(Args.MakeArgString(TargetInfo.str()));
+ }
+
bool WholeProgramVTables =
Args.hasFlag(options::OPT_fwhole_program_vtables,
options::OPT_fno_whole_program_vtables, false);
@@ -6465,6 +6937,20 @@ void ClangAs::AddMIPSTargetArgs(const ArgList &Args,
CmdArgs.push_back(ABIName.data());
}
+void ClangAs::AddX86TargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ if (Arg *A = Args.getLastArg(options::OPT_masm_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "intel" || Value == "att") {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString("-x86-asm-syntax=" + Value));
+ } else {
+ getToolChain().getDriver().Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Value;
+ }
+ }
+}
+
void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const ArgList &Args,
@@ -6474,9 +6960,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
assert(Inputs.size() == 1 && "Unexpected number of inputs.");
const InputInfo &Input = Inputs[0];
- std::string TripleStr =
- getToolChain().ComputeEffectiveClangTriple(Args, Input.getType());
- const llvm::Triple Triple(TripleStr);
+ const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
+ const std::string &TripleStr = Triple.getTriple();
// Don't warn about "clang -w -c foo.s"
Args.ClaimAllArgs(options::OPT_w);
@@ -6571,7 +7056,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
unsigned PICLevel;
bool IsPIE;
std::tie(RelocationModel, PICLevel, IsPIE) =
- ParsePICArgs(getToolChain(), Triple, Args);
+ ParsePICArgs(getToolChain(), Args);
const char *RMName = RelocationModelName(RelocationModel);
if (RMName) {
@@ -6612,6 +7097,11 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::mips64el:
AddMIPSTargetArgs(Args, CmdArgs);
break;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ AddX86TargetArgs(Args, CmdArgs);
+ break;
}
// Consume all the warning flags. Usually this would be handled more
@@ -6645,6 +7135,134 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
SplitDebugName(Args, Input));
}
+void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const {
+ // The version with only one output is expected to refer to a bundling job.
+ assert(isa<OffloadBundlingJobAction>(JA) && "Expecting bundling job!");
+
+ // The bundling command looks like this:
+ // clang-offload-bundler -type=bc
+ // -targets=host-triple,openmp-triple1,openmp-triple2
+ // -outputs=input_file
+ // -inputs=unbundle_file_host,unbundle_file_tgt1,unbundle_file_tgt2"
+
+ ArgStringList CmdArgs;
+
+ // Get the type.
+ CmdArgs.push_back(TCArgs.MakeArgString(
+ Twine("-type=") + types::getTypeTempSuffix(Output.getType())));
+
+ assert(JA.getInputs().size() == Inputs.size() &&
+ "Not have inputs for all dependence actions??");
+
+ // Get the targets.
+ SmallString<128> Triples;
+ Triples += "-targets=";
+ for (unsigned I = 0; I < Inputs.size(); ++I) {
+ if (I)
+ Triples += ',';
+
+ Action::OffloadKind CurKind = Action::OFK_Host;
+ const ToolChain *CurTC = &getToolChain();
+ const Action *CurDep = JA.getInputs()[I];
+
+ if (const auto *OA = dyn_cast<OffloadAction>(CurDep)) {
+ OA->doOnEachDependence([&](Action *A, const ToolChain *TC, const char *) {
+ CurKind = A->getOffloadingDeviceKind();
+ CurTC = TC;
+ });
+ }
+ Triples += Action::GetOffloadKindName(CurKind);
+ Triples += '-';
+ Triples += CurTC->getTriple().normalize();
+ }
+ CmdArgs.push_back(TCArgs.MakeArgString(Triples));
+
+ // Get bundled file command.
+ CmdArgs.push_back(
+ TCArgs.MakeArgString(Twine("-outputs=") + Output.getFilename()));
+
+ // Get unbundled files command.
+ SmallString<128> UB;
+ UB += "-inputs=";
+ for (unsigned I = 0; I < Inputs.size(); ++I) {
+ if (I)
+ UB += ',';
+ UB += Inputs[I].getFilename();
+ }
+ CmdArgs.push_back(TCArgs.MakeArgString(UB));
+
+ // All the inputs are encoded as commands.
+ C.addCommand(llvm::make_unique<Command>(
+ JA, *this,
+ TCArgs.MakeArgString(getToolChain().GetProgramPath(getShortName())),
+ CmdArgs, None));
+}
+
+void OffloadBundler::ConstructJobMultipleOutputs(
+ Compilation &C, const JobAction &JA, const InputInfoList &Outputs,
+ const InputInfoList &Inputs, const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const {
+ // The version with multiple outputs is expected to refer to a unbundling job.
+ auto &UA = cast<OffloadUnbundlingJobAction>(JA);
+
+ // The unbundling command looks like this:
+ // clang-offload-bundler -type=bc
+ // -targets=host-triple,openmp-triple1,openmp-triple2
+ // -inputs=input_file
+ // -outputs=unbundle_file_host,unbundle_file_tgt1,unbundle_file_tgt2"
+ // -unbundle
+
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Expecting to unbundle a single file!");
+ InputInfo Input = Inputs.front();
+
+ // Get the type.
+ CmdArgs.push_back(TCArgs.MakeArgString(
+ Twine("-type=") + types::getTypeTempSuffix(Input.getType())));
+
+ // Get the targets.
+ SmallString<128> Triples;
+ Triples += "-targets=";
+ auto DepInfo = UA.getDependentActionsInfo();
+ for (unsigned I = 0; I < DepInfo.size(); ++I) {
+ if (I)
+ Triples += ',';
+
+ auto &Dep = DepInfo[I];
+ Triples += Action::GetOffloadKindName(Dep.DependentOffloadKind);
+ Triples += '-';
+ Triples += Dep.DependentToolChain->getTriple().normalize();
+ }
+
+ CmdArgs.push_back(TCArgs.MakeArgString(Triples));
+
+ // Get bundled file command.
+ CmdArgs.push_back(
+ TCArgs.MakeArgString(Twine("-inputs=") + Input.getFilename()));
+
+ // Get unbundled files command.
+ SmallString<128> UB;
+ UB += "-outputs=";
+ for (unsigned I = 0; I < Outputs.size(); ++I) {
+ if (I)
+ UB += ',';
+ UB += Outputs[I].getFilename();
+ }
+ CmdArgs.push_back(TCArgs.MakeArgString(UB));
+ CmdArgs.push_back("-unbundle");
+
+ // All the inputs are encoded as commands.
+ C.addCommand(llvm::make_unique<Command>(
+ JA, *this,
+ TCArgs.MakeArgString(getToolChain().GetProgramPath(getShortName())),
+ CmdArgs, None));
+}
+
void GnuTool::anchor() {}
void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
@@ -7022,7 +7640,7 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
{options::OPT_T_Group, options::OPT_e, options::OPT_s,
options::OPT_t, options::OPT_u_Group});
- AddLinkerInputs(HTC, Inputs, Args, CmdArgs);
+ AddLinkerInputs(HTC, Inputs, Args, CmdArgs, JA);
//----------------------------------------------------------------------------
// Libraries
@@ -7081,7 +7699,7 @@ void amdgpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::string Linker = getToolChain().GetProgramPath(getShortName());
ArgStringList CmdArgs;
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
CmdArgs.push_back("-shared");
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -7144,7 +7762,7 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
}
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX())
@@ -7397,11 +8015,14 @@ llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) {
const llvm::Triple::ArchType Arch = getArchTypeForMachOArchName(Str);
+ unsigned ArchKind = llvm::ARM::parseArch(Str);
T.setArch(Arch);
if (Str == "x86_64h")
T.setArchName(Str);
- else if (Str == "armv6m" || Str == "armv7m" || Str == "armv7em") {
+ else if (ArchKind == llvm::ARM::AK_ARMV6M ||
+ ArchKind == llvm::ARM::AK_ARMV7M ||
+ ArchKind == llvm::ARM::AK_ARMV7EM) {
T.setOS(llvm::Triple::UnknownOS);
T.setObjectFormat(llvm::Triple::MachO);
}
@@ -7488,9 +8109,9 @@ void cloudabi::Linker::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
if (D.isUsingLTO())
- AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin);
+ AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin, D);
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX())
@@ -7600,6 +8221,29 @@ bool darwin::Linker::NeedsTempPath(const InputInfoList &Inputs) const {
return false;
}
+/// \brief Pass -no_deduplicate to ld64 under certain conditions:
+///
+/// - Either -O0 or -O1 is explicitly specified
+/// - No -O option is specified *and* this is a compile+link (implicit -O0)
+///
+/// Also do *not* add -no_deduplicate when no -O option is specified and this
+/// is just a link (we can't imply -O0)
+static bool shouldLinkerNotDedup(bool IsLinkerOnlyAction, const ArgList &Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O0))
+ return true;
+ if (A->getOption().matches(options::OPT_O))
+ return llvm::StringSwitch<bool>(A->getValue())
+ .Case("1", true)
+ .Default(false);
+ return false; // OPT_Ofast & OPT_O4
+ }
+
+ if (!IsLinkerOnlyAction) // Implicit -O0 for compile+linker only.
+ return true;
+ return false;
+}
+
void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
ArgStringList &CmdArgs,
const InputInfoList &Inputs) const {
@@ -7656,6 +8300,10 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
CmdArgs.push_back(C.getArgs().MakeArgString(LibLTOPath));
}
+ // ld64 version 262 and above run the deduplicate pass by default.
+ if (Version[0] >= 262 && shouldLinkerNotDedup(C.getJobs().empty(), Args))
+ CmdArgs.push_back("-no_deduplicate");
+
// Derived from the "link" spec.
Args.AddAllArgs(CmdArgs, options::OPT_static);
if (!Args.hasArg(options::OPT_static))
@@ -7742,9 +8390,9 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
else
CmdArgs.push_back("-no_pie");
}
+
// for embed-bitcode, use -bitcode_bundle in linker command
- if (C.getDriver().embedBitcodeEnabled() ||
- C.getDriver().embedBitcodeMarkerOnly()) {
+ if (C.getDriver().embedBitcodeEnabled()) {
// Check if the toolchain supports bitcode build flow.
if (MachOTC.SupportsEmbeddedBitcode())
CmdArgs.push_back("-bitcode_bundle");
@@ -7837,6 +8485,24 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// we follow suite for ease of comparison.
AddLinkArgs(C, Args, CmdArgs, Inputs);
+ // For LTO, pass the name of the optimization record file.
+ if (Args.hasFlag(options::OPT_fsave_optimization_record,
+ options::OPT_fno_save_optimization_record, false)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-lto-pass-remarks-output");
+ CmdArgs.push_back("-mllvm");
+
+ SmallString<128> F;
+ F = Output.getFilename();
+ F += ".opt.yaml";
+ CmdArgs.push_back(Args.MakeArgString(F));
+
+ if (getLastProfileUseArg(Args)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-lto-pass-remarks-with-hotness");
+ }
+ }
+
// It seems that the 'e' option is completely ignored for dynamic executables
// (the default), and with static executables, the last one wins, as expected.
Args.AddAllArgs(CmdArgs, {options::OPT_d_Flag, options::OPT_s, options::OPT_t,
@@ -7866,7 +8532,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
// Build the input file for -filelist (list of linker input files) in case we
// need it later
for (const auto &II : Inputs) {
@@ -7909,6 +8575,13 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getMachOToolChain().addProfileRTLibs(Args, CmdArgs);
+ if (unsigned Parallelism =
+ getLTOParallelism(Args, getToolChain().getDriver())) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-threads=") + llvm::to_string(Parallelism)));
+ }
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (getToolChain().getDriver().CCCIsCXX())
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
@@ -8084,7 +8757,7 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
options::OPT_e, options::OPT_r});
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (getToolChain().getDriver().CCCIsCXX())
@@ -8257,7 +8930,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_e, options::OPT_s, options::OPT_t,
options::OPT_Z_Flag, options::OPT_r});
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX()) {
@@ -8376,7 +9049,7 @@ void bitrig::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs,
{options::OPT_L, options::OPT_T_Group, options::OPT_e});
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
if (D.CCCIsCXX()) {
@@ -8639,10 +9312,10 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_r);
if (D.isUsingLTO())
- AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin);
+ AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin, D);
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
addOpenMPRuntime(CmdArgs, ToolChain, Args);
@@ -8849,9 +9522,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
break;
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- arm::appendEBLinkFlags(
- Args, CmdArgs,
- llvm::Triple(getToolChain().ComputeEffectiveClangTriple(Args)));
+ arm::appendEBLinkFlags(Args, CmdArgs, getToolChain().getEffectiveTriple());
CmdArgs.push_back("-m");
switch (getToolChain().getTriple().getEnvironment()) {
case llvm::Triple::EABI:
@@ -8939,7 +9610,7 @@ void netbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
Args.AddAllArgs(CmdArgs, options::OPT_r);
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
unsigned Major, Minor, Micro;
getToolChain().getTriple().getOSVersion(Major, Minor, Micro);
@@ -9015,16 +9686,13 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const char *LinkingOutput) const {
claimNoWarnArgs(Args);
- std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
- llvm::Triple Triple = llvm::Triple(TripleStr);
-
ArgStringList CmdArgs;
llvm::Reloc::Model RelocationModel;
unsigned PICLevel;
bool IsPIE;
std::tie(RelocationModel, PICLevel, IsPIE) =
- ParsePICArgs(getToolChain(), Triple, Args);
+ ParsePICArgs(getToolChain(), Args);
switch (getToolChain().getArch()) {
default:
@@ -9107,7 +9775,7 @@ void gnutools::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// march from being picked in the absence of a cpu flag.
Arg *A;
if ((A = Args.getLastArg(options::OPT_mcpu_EQ)) &&
- StringRef(A->getValue()).lower() == "krait")
+ StringRef(A->getValue()).equals_lower("krait"))
CmdArgs.push_back("-mcpu=cortex-a15");
else
Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
@@ -9273,6 +9941,7 @@ static void AddRunTimeLibs(const ToolChain &TC, const Driver &D,
llvm_unreachable("unsupported OS");
case llvm::Triple::Win32:
case llvm::Triple::Linux:
+ case llvm::Triple::Fuchsia:
addClangRT(TC, Args, CmdArgs);
break;
}
@@ -9338,7 +10007,7 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
return "elf32_x86_64";
return "elf_x86_64";
default:
- llvm_unreachable("Unexpected arch");
+ return nullptr;
}
}
@@ -9351,8 +10020,7 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
static_cast<const toolchains::Linux &>(getToolChain());
const Driver &D = ToolChain.getDriver();
- std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
- llvm::Triple Triple = llvm::Triple(TripleStr);
+ const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
const llvm::Triple::ArchType Arch = ToolChain.getArch();
const bool isAndroid = ToolChain.getTriple().isAndroid();
@@ -9397,6 +10065,14 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Arch == llvm::Triple::armeb || Arch == llvm::Triple::thumbeb)
arm::appendEBLinkFlags(Args, CmdArgs, Triple);
+ // Most Android ARM64 targets should enable the linker fix for erratum
+ // 843419. Only non-Cortex-A53 devices are allowed to skip this flag.
+ if (Arch == llvm::Triple::aarch64 && isAndroid) {
+ std::string CPU = getCPUName(Args, Triple);
+ if (CPU.empty() || CPU == "generic" || CPU == "cortex-a53")
+ CmdArgs.push_back("--fix-cortex-a53-843419");
+ }
+
for (const auto &Opt : ToolChain.ExtraOpts)
CmdArgs.push_back(Opt.c_str());
@@ -9404,8 +10080,13 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--eh-frame-hdr");
}
- CmdArgs.push_back("-m");
- CmdArgs.push_back(getLDMOption(ToolChain.getTriple(), Args));
+ if (const char *LDMOption = getLDMOption(ToolChain.getTriple(), Args)) {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back(LDMOption);
+ } else {
+ D.Diag(diag::err_target_unknown_triple) << Triple.str();
+ return;
+ }
if (Args.hasArg(options::OPT_static)) {
if (Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
@@ -9476,14 +10157,14 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
if (D.isUsingLTO())
- AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin);
+ AddGoldPlugin(ToolChain, Args, CmdArgs, D.getLTOMode() == LTOK_Thin, D);
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
// The profile runtime also needs access to system libraries.
getToolChain().addProfileRTLibs(Args, CmdArgs);
@@ -9522,24 +10203,26 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
WantPthread = true;
// Also link the particular OpenMP runtimes.
- switch (getOpenMPRuntime(ToolChain, Args)) {
- case OMPRT_OMP:
+ switch (ToolChain.getDriver().getOpenMPRuntime(Args)) {
+ case Driver::OMPRT_OMP:
CmdArgs.push_back("-lomp");
break;
- case OMPRT_GOMP:
+ case Driver::OMPRT_GOMP:
CmdArgs.push_back("-lgomp");
// FIXME: Exclude this for platforms with libgomp that don't require
// librt. Most modern Linux platforms require it, but some may not.
CmdArgs.push_back("-lrt");
break;
- case OMPRT_IOMP5:
+ case Driver::OMPRT_IOMP5:
CmdArgs.push_back("-liomp5");
break;
- case OMPRT_Unknown:
+ case Driver::OMPRT_Unknown:
// Already diagnosed.
break;
}
+ if (JA.isHostOffloading(Action::OFK_OpenMP))
+ CmdArgs.push_back("-lomptarget");
}
AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
@@ -9585,6 +10268,9 @@ void gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ // Add OpenMP offloading linker script args if required.
+ AddOpenMPLinkerScript(getToolChain(), C, Output, Inputs, Args, CmdArgs, JA);
+
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
@@ -9694,7 +10380,7 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (D.CCCIsCXX() &&
!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
@@ -9760,6 +10446,112 @@ void nacltools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
+void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const toolchains::Fuchsia &ToolChain =
+ static_cast<const toolchains::Fuchsia &>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -w foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
+ if (llvm::sys::path::stem(Exec).equals_lower("lld")) {
+ CmdArgs.push_back("-flavor");
+ CmdArgs.push_back("gnu");
+ }
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (!Args.hasArg(options::OPT_shared) && !Args.hasArg(options::OPT_r))
+ CmdArgs.push_back("-pie");
+
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+
+ if (Args.hasArg(options::OPT_s))
+ CmdArgs.push_back("-s");
+
+ if (Args.hasArg(options::OPT_r))
+ CmdArgs.push_back("-r");
+ else
+ CmdArgs.push_back("--build-id");
+
+ if (!Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--eh-frame-hdr");
+
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("-Bstatic");
+ else if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-shared");
+
+ if (!Args.hasArg(options::OPT_static)) {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back(Args.MakeArgString(D.DyldPrefix + "ld.so.1"));
+ }
+ }
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("Scrt1.o")));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_u);
+
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("-Bdynamic");
+
+ if (D.CCCIsCXX()) {
+ bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
+ !Args.hasArg(options::OPT_static);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bstatic");
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bdynamic");
+ CmdArgs.push_back("-lm");
+ }
+
+ AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
+
+ if (Args.hasArg(options::OPT_pthread) ||
+ Args.hasArg(options::OPT_pthreads))
+ CmdArgs.push_back("-lpthread");
+
+ if (Args.hasArg(options::OPT_fsplit_stack))
+ CmdArgs.push_back("--wrap=pthread_create");
+
+ CmdArgs.push_back("-lc");
+ }
+
+ C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
+}
+
void minix::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -9806,7 +10598,7 @@ void minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs,
{options::OPT_L, options::OPT_T_Group, options::OPT_e});
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
getToolChain().addProfileRTLibs(Args, CmdArgs);
@@ -9927,7 +10719,7 @@ void dragonfly::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs,
{options::OPT_L, options::OPT_T_Group, options::OPT_e});
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
CmdArgs.push_back("-L/usr/lib/gcc50");
@@ -10050,14 +10842,14 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::string UniversalCRTLibPath;
if (MSVC.getUniversalCRTLibraryPath(UniversalCRTLibPath))
CmdArgs.push_back(Args.MakeArgString(std::string("-libpath:") +
- UniversalCRTLibPath.c_str()));
+ UniversalCRTLibPath));
}
}
std::string WindowsSdkLibPath;
if (MSVC.getWindowsSDKLibraryPath(WindowsSdkLibPath))
- CmdArgs.push_back(Args.MakeArgString(std::string("-libpath:") +
- WindowsSdkLibPath.c_str()));
+ CmdArgs.push_back(
+ Args.MakeArgString(std::string("-libpath:") + WindowsSdkLibPath));
}
if (!C.getDriver().IsCLMode() && Args.hasArg(options::OPT_L))
@@ -10083,12 +10875,16 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (TC.getSanitizerArgs().needsAsanRt()) {
CmdArgs.push_back(Args.MakeArgString("-debug"));
CmdArgs.push_back(Args.MakeArgString("-incremental:no"));
- if (Args.hasArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd)) {
+ if (TC.getSanitizerArgs().needsSharedAsanRt() ||
+ Args.hasArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd)) {
for (const auto &Lib : {"asan_dynamic", "asan_dynamic_runtime_thunk"})
CmdArgs.push_back(TC.getCompilerRTArgString(Args, Lib));
// Make sure the dynamic runtime thunk is not optimized out at link time
// to ensure proper SEH handling.
- CmdArgs.push_back(Args.MakeArgString("-include:___asan_seh_interceptor"));
+ CmdArgs.push_back(Args.MakeArgString(
+ TC.getArch() == llvm::Triple::x86
+ ? "-include:___asan_seh_interceptor"
+ : "-include:__asan_seh_interceptor"));
} else if (DLL) {
CmdArgs.push_back(TC.getCompilerRTArgString(Args, "asan_dll_thunk"));
} else {
@@ -10105,16 +10901,16 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-nodefaultlib:vcompd.lib");
CmdArgs.push_back(Args.MakeArgString(std::string("-libpath:") +
TC.getDriver().Dir + "/../lib"));
- switch (getOpenMPRuntime(getToolChain(), Args)) {
- case OMPRT_OMP:
+ switch (TC.getDriver().getOpenMPRuntime(Args)) {
+ case Driver::OMPRT_OMP:
CmdArgs.push_back("-defaultlib:libomp.lib");
break;
- case OMPRT_IOMP5:
+ case Driver::OMPRT_IOMP5:
CmdArgs.push_back("-defaultlib:libiomp5md.lib");
break;
- case OMPRT_GOMP:
+ case Driver::OMPRT_GOMP:
break;
- case OMPRT_Unknown:
+ case Driver::OMPRT_Unknown:
// Already diagnosed.
break;
}
@@ -10272,6 +11068,14 @@ std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
options::OPT__SLASH_MT, options::OPT__SLASH_MTd))
A->render(Args, CmdArgs);
+ // Use MSVC's default threadsafe statics behaviour unless there was a flag.
+ if (Arg *A = Args.getLastArg(options::OPT_fthreadsafe_statics,
+ options::OPT_fno_threadsafe_statics)) {
+ CmdArgs.push_back(A->getOption().getID() == options::OPT_fthreadsafe_statics
+ ? "/Zc:threadSafeInit"
+ : "/Zc:threadSafeInit-");
+ }
+
// Pass through all unknown arguments so that the fallback command can see
// them too.
Args.AddAllArgs(CmdArgs, options::OPT_UNKNOWN);
@@ -10454,7 +11258,7 @@ void MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
TC.AddFilePathLibArgs(Args, CmdArgs);
- AddLinkerInputs(TC, Inputs, Args, CmdArgs);
+ AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
// TODO: Add ASan stuff here
@@ -10578,7 +11382,7 @@ void XCore::Linker::ConstructJob(Compilation &C, const JobAction &JA,
false))
CmdArgs.push_back("-fexceptions");
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("xcc"));
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
@@ -10732,7 +11536,7 @@ void CrossWindows::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
TC.AddFilePathLibArgs(Args, CmdArgs);
- AddLinkerInputs(TC, Inputs, Args, CmdArgs);
+ AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
if (D.CCCIsCXX() && !Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
@@ -10798,12 +11602,14 @@ void tools::SHAVE::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
// Append all -I, -iquote, -isystem paths, defines/undefines,
// 'f' flags, optimize flags, and warning options.
// These are spelled the same way in clang and moviCompile.
- Args.AddAllArgs(CmdArgs, {options::OPT_I_Group, options::OPT_clang_i_Group,
- options::OPT_std_EQ, options::OPT_D, options::OPT_U,
- options::OPT_f_Group, options::OPT_f_clang_Group,
- options::OPT_g_Group, options::OPT_M_Group,
- options::OPT_O_Group, options::OPT_W_Group,
- options::OPT_mcpu_EQ});
+ Args.AddAllArgsExcept(
+ CmdArgs,
+ {options::OPT_I_Group, options::OPT_clang_i_Group, options::OPT_std_EQ,
+ options::OPT_D, options::OPT_U, options::OPT_f_Group,
+ options::OPT_f_clang_Group, options::OPT_g_Group, options::OPT_M_Group,
+ options::OPT_O_Group, options::OPT_W_Group, options::OPT_mcpu_EQ},
+ {options::OPT_fno_split_dwarf_inlining});
+ Args.hasArg(options::OPT_fno_split_dwarf_inlining); // Claim it if present.
// If we're producing a dependency file, and assembly is the final action,
// then the name of the target in the dependency file should be the '.o'
@@ -10879,6 +11685,8 @@ void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles);
bool UseDefaultLibs =
!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs);
+ // Silence warning if the args contain both -nostdlib and -stdlib=.
+ Args.getLastArg(options::OPT_stdlib_EQ);
if (T.getArch() == llvm::Triple::sparc)
CmdArgs.push_back("-EB");
@@ -10913,22 +11721,31 @@ void tools::Myriad::Linker::ConstructJob(Compilation &C, const JobAction &JA,
TC.AddFilePathLibArgs(Args, CmdArgs);
- AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+ bool NeedsSanitizerDeps = addSanitizerRuntimes(TC, Args, CmdArgs);
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
if (UseDefaultLibs) {
- if (C.getDriver().CCCIsCXX())
- CmdArgs.push_back("-lstdc++");
+ if (NeedsSanitizerDeps)
+ linkSanitizerRuntimeDeps(TC, CmdArgs);
+ if (C.getDriver().CCCIsCXX()) {
+ if (TC.GetCXXStdlibType(Args) == ToolChain::CST_Libcxx) {
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ } else
+ CmdArgs.push_back("-lstdc++");
+ }
if (T.getOS() == llvm::Triple::RTEMS) {
CmdArgs.push_back("--start-group");
CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lgcc"); // circularly dependent on rtems
// You must provide your own "-L" option to enable finding these.
CmdArgs.push_back("-lrtemscpu");
CmdArgs.push_back("-lrtemsbsp");
CmdArgs.push_back("--end-group");
} else {
CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lgcc");
}
- CmdArgs.push_back("-lgcc");
}
if (UseStartfiles) {
CmdArgs.push_back(Args.MakeArgString(TC.GetFilePath("crtend.o")));
@@ -11022,7 +11839,7 @@ static void ConstructPS4LinkJob(const Tool &T, Compilation &C,
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (Args.hasArg(options::OPT_pthread)) {
CmdArgs.push_back("-lpthread");
@@ -11118,7 +11935,7 @@ static void ConstructGoldLinkJob(const Tool &T, Compilation &C,
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
// For PS4, we always want to pass libm, libstdc++ and libkernel
@@ -11254,7 +12071,7 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
// Check that our installation's ptxas supports gpu_arch.
if (!Args.hasArg(options::OPT_no_cuda_version_check)) {
- TC.cudaInstallation().CheckCudaVersionSupportsArch(gpu_arch);
+ TC.CudaInstallation.CheckCudaVersionSupportsArch(gpu_arch);
}
ArgStringList CmdArgs;
@@ -11307,7 +12124,11 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
for (const auto& A : Args.getAllArgValues(options::OPT_Xcuda_ptxas))
CmdArgs.push_back(Args.MakeArgString(A));
- const char *Exec = Args.MakeArgString(TC.GetProgramPath("ptxas"));
+ const char *Exec;
+ if (Arg *A = Args.getLastArg(options::OPT_ptxas_path_EQ))
+ Exec = A->getValue();
+ else
+ Exec = Args.MakeArgString(TC.GetProgramPath("ptxas"));
C.addCommand(llvm::make_unique<Command>(JA, *this, Exec, CmdArgs, Inputs));
}
diff --git a/lib/Driver/Tools.h b/lib/Driver/Tools.h
index 02bdb8e5e2d2..98dcf841169e 100644
--- a/lib/Driver/Tools.h
+++ b/lib/Driver/Tools.h
@@ -17,6 +17,7 @@
#include "clang/Driver/Util.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Option/Option.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Compiler.h"
namespace clang {
@@ -101,6 +102,12 @@ private:
mutable std::unique_ptr<visualstudio::Compiler> CLFallback;
+ mutable std::unique_ptr<llvm::raw_fd_ostream> CompilationDatabase = nullptr;
+ void DumpCompilationDatabase(Compilation &C, StringRef Filename,
+ StringRef Target,
+ const InputInfo &Output, const InputInfo &Input,
+ const llvm::opt::ArgList &Args) const;
+
public:
// CAUTION! The first constructor argument ("clang") is not arbitrary,
// as it is for other tools. Some operations on a Tool actually test
@@ -125,6 +132,8 @@ public:
: Tool("clang::as", "clang integrated assembler", TC, RF_Full) {}
void AddMIPSTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ void AddX86TargetArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
bool hasGoodDiagnostics() const override { return true; }
bool hasIntegratedAssembler() const override { return false; }
bool hasIntegratedCPP() const override { return false; }
@@ -135,6 +144,24 @@ public:
const char *LinkingOutput) const override;
};
+/// Offload bundler tool.
+class LLVM_LIBRARY_VISIBILITY OffloadBundler final : public Tool {
+public:
+ OffloadBundler(const ToolChain &TC)
+ : Tool("offload bundler", "clang-offload-bundler", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+ void ConstructJobMultipleOutputs(Compilation &C, const JobAction &JA,
+ const InputInfoList &Outputs,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
/// \brief Base class for all GNU tools that provide the same behavior when
/// it comes to response files support
class LLVM_LIBRARY_VISIBILITY GnuTool : public Tool {
@@ -594,6 +621,21 @@ public:
};
} // end namespace nacltools
+namespace fuchsia {
+class LLVM_LIBRARY_VISIBILITY Linker : public GnuTool {
+public:
+ Linker(const ToolChain &TC) : GnuTool("fuchsia::Linker", "ld.lld", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace fuchsia
+
/// minix -- Directly call GNU Binutils assembler and linker
namespace minix {
class LLVM_LIBRARY_VISIBILITY Assembler : public GnuTool {
@@ -683,10 +725,6 @@ public:
/// Visual studio tools.
namespace visualstudio {
-VersionTuple getMSVCVersion(const Driver *D, const ToolChain &TC,
- const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args, bool IsWindowsMSVC);
-
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
Linker(const ToolChain &TC)
diff --git a/lib/Driver/Types.cpp b/lib/Driver/Types.cpp
index f8e1e40dc6bf..ab63f0e81b12 100644
--- a/lib/Driver/Types.cpp
+++ b/lib/Driver/Types.cpp
@@ -44,13 +44,28 @@ types::ID types::getPreprocessedType(ID Id) {
return getInfo(Id).PreprocessedType;
}
+types::ID types::getPrecompiledType(ID Id) {
+ if (strchr(getInfo(Id).Flags, 'm'))
+ return TY_ModuleFile;
+ if (onlyPrecompileType(Id))
+ return TY_PCH;
+ return TY_INVALID;
+}
+
const char *types::getTypeTempSuffix(ID Id, bool CLMode) {
- if (Id == TY_Object && CLMode)
- return "obj";
- if (Id == TY_Image && CLMode)
- return "exe";
- if (Id == TY_PP_Asm && CLMode)
- return "asm";
+ if (CLMode) {
+ switch (Id) {
+ case TY_Object:
+ case TY_LTO_BC:
+ return "obj";
+ case TY_Image:
+ return "exe";
+ case TY_PP_Asm:
+ return "asm";
+ default:
+ break;
+ }
+ }
return getInfo(Id).TempSuffix;
}
@@ -95,6 +110,7 @@ bool types::isAcceptedByClang(ID Id) {
case TY_ObjCHeader: case TY_PP_ObjCHeader:
case TY_CXXHeader: case TY_PP_CXXHeader:
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
+ case TY_CXXModule: case TY_PP_CXXModule:
case TY_AST: case TY_ModuleFile:
case TY_LLVM_IR: case TY_LLVM_BC:
return true;
@@ -123,6 +139,7 @@ bool types::isCXX(ID Id) {
case TY_ObjCXX: case TY_PP_ObjCXX: case TY_PP_ObjCXX_Alias:
case TY_CXXHeader: case TY_PP_CXXHeader:
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
+ case TY_CXXModule: case TY_PP_CXXModule:
case TY_CUDA: case TY_PP_CUDA: case TY_CUDA_DEVICE:
return true;
}
@@ -153,58 +170,67 @@ bool types::isCuda(ID Id) {
}
}
-types::ID types::lookupTypeForExtension(const char *Ext) {
+bool types::isSrcFile(ID Id) {
+ return Id != TY_Object && getPreprocessedType(Id) != TY_INVALID;
+}
+
+types::ID types::lookupTypeForExtension(llvm::StringRef Ext) {
return llvm::StringSwitch<types::ID>(Ext)
.Case("c", TY_C)
+ .Case("C", TY_CXX)
+ .Case("F", TY_Fortran)
+ .Case("f", TY_PP_Fortran)
+ .Case("h", TY_CHeader)
+ .Case("H", TY_CXXHeader)
.Case("i", TY_PP_C)
.Case("m", TY_ObjC)
.Case("M", TY_ObjCXX)
- .Case("h", TY_CHeader)
- .Case("C", TY_CXX)
- .Case("H", TY_CXXHeader)
- .Case("f", TY_PP_Fortran)
- .Case("F", TY_Fortran)
- .Case("s", TY_PP_Asm)
- .Case("asm", TY_PP_Asm)
- .Case("S", TY_Asm)
.Case("o", TY_Object)
- .Case("obj", TY_Object)
- .Case("lib", TY_Object)
- .Case("ii", TY_PP_CXX)
- .Case("mi", TY_PP_ObjC)
- .Case("mm", TY_ObjCXX)
+ .Case("S", TY_Asm)
+ .Case("s", TY_PP_Asm)
.Case("bc", TY_LLVM_BC)
.Case("cc", TY_CXX)
.Case("CC", TY_CXX)
.Case("cl", TY_CL)
.Case("cp", TY_CXX)
.Case("cu", TY_CUDA)
- .Case("cui", TY_PP_CUDA)
.Case("hh", TY_CXXHeader)
+ .Case("ii", TY_PP_CXX)
.Case("ll", TY_LLVM_IR)
- .Case("hpp", TY_CXXHeader)
- .Case("ads", TY_Ada)
+ .Case("mi", TY_PP_ObjC)
+ .Case("mm", TY_ObjCXX)
+ .Case("rs", TY_RenderScript)
.Case("adb", TY_Ada)
+ .Case("ads", TY_Ada)
+ .Case("asm", TY_PP_Asm)
.Case("ast", TY_AST)
+ .Case("ccm", TY_CXXModule)
+ .Case("cpp", TY_CXX)
+ .Case("CPP", TY_CXX)
.Case("c++", TY_CXX)
.Case("C++", TY_CXX)
+ .Case("cui", TY_PP_CUDA)
.Case("cxx", TY_CXX)
- .Case("cpp", TY_CXX)
- .Case("CPP", TY_CXX)
.Case("CXX", TY_CXX)
+ .Case("F90", TY_Fortran)
+ .Case("f90", TY_PP_Fortran)
+ .Case("F95", TY_Fortran)
+ .Case("f95", TY_PP_Fortran)
.Case("for", TY_PP_Fortran)
.Case("FOR", TY_PP_Fortran)
.Case("fpp", TY_Fortran)
.Case("FPP", TY_Fortran)
- .Case("f90", TY_PP_Fortran)
- .Case("f95", TY_PP_Fortran)
- .Case("F90", TY_Fortran)
- .Case("F95", TY_Fortran)
+ .Case("gch", TY_PCH)
+ .Case("hpp", TY_CXXHeader)
+ .Case("iim", TY_PP_CXXModule)
+ .Case("lib", TY_Object)
.Case("mii", TY_PP_ObjCXX)
- .Case("pcm", TY_ModuleFile)
+ .Case("obj", TY_Object)
.Case("pch", TY_PCH)
- .Case("gch", TY_PCH)
- .Case("rs", TY_RenderScript)
+ .Case("pcm", TY_ModuleFile)
+ .Case("c++m", TY_CXXModule)
+ .Case("cppm", TY_CXXModule)
+ .Case("cxxm", TY_CXXModule)
.Default(TY_INVALID);
}
@@ -226,9 +252,11 @@ void types::getCompilationPhases(ID Id, llvm::SmallVectorImpl<phases::ID> &P) {
P.push_back(phases::Preprocess);
}
- if (onlyPrecompileType(Id)) {
+ if (getPrecompiledType(Id) != TY_INVALID) {
P.push_back(phases::Precompile);
- } else {
+ }
+
+ if (!onlyPrecompileType(Id)) {
if (!onlyAssembleType(Id)) {
P.push_back(phases::Compile);
P.push_back(phases::Backend);
@@ -237,7 +265,7 @@ void types::getCompilationPhases(ID Id, llvm::SmallVectorImpl<phases::ID> &P) {
}
}
- if (!onlyPrecompileType(Id) && Id != TY_CUDA_DEVICE) {
+ if (!onlyPrecompileType(Id)) {
P.push_back(phases::Link);
}
assert(0 < P.size() && "Not enough phases in list");
@@ -259,3 +287,21 @@ ID types::lookupCXXTypeForCType(ID Id) {
return types::TY_PP_CXXHeader;
}
}
+
+ID types::lookupHeaderTypeForSourceType(ID Id) {
+ switch (Id) {
+ default:
+ return Id;
+
+ case types::TY_C:
+ return types::TY_CHeader;
+ case types::TY_CXX:
+ return types::TY_CXXHeader;
+ case types::TY_ObjC:
+ return types::TY_ObjCHeader;
+ case types::TY_ObjCXX:
+ return types::TY_ObjCXXHeader;
+ case types::TY_CL:
+ return types::TY_CLHeader;
+ }
+}
diff --git a/lib/Edit/RewriteObjCFoundationAPI.cpp b/lib/Edit/RewriteObjCFoundationAPI.cpp
index 482c0f6f8568..2148316532de 100644
--- a/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -1076,6 +1076,8 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
+ case CK_IntToOCLSampler:
return false;
case CK_BooleanToSignedIntegral:
diff --git a/lib/Format/BreakableToken.cpp b/lib/Format/BreakableToken.cpp
index 36a8c4d8da6d..6363f895f95b 100644
--- a/lib/Format/BreakableToken.cpp
+++ b/lib/Format/BreakableToken.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "BreakableToken.h"
+#include "Comments.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/STLExtras.h"
@@ -182,21 +183,6 @@ void BreakableStringLiteral::insertBreak(unsigned LineIndex,
Prefix, InPPDirective, 1, IndentLevel, LeadingSpaces);
}
-static StringRef getLineCommentIndentPrefix(StringRef Comment) {
- static const char *const KnownPrefixes[] = {"///", "//", "//!"};
- StringRef LongestPrefix;
- for (StringRef KnownPrefix : KnownPrefixes) {
- if (Comment.startswith(KnownPrefix)) {
- size_t PrefixLength = KnownPrefix.size();
- while (PrefixLength < Comment.size() && Comment[PrefixLength] == ' ')
- ++PrefixLength;
- if (PrefixLength > LongestPrefix.size())
- LongestPrefix = Comment.substr(0, PrefixLength);
- }
- }
- return LongestPrefix;
-}
-
BreakableLineComment::BreakableLineComment(
const FormatToken &Token, unsigned IndentLevel, unsigned StartColumn,
bool InPPDirective, encoding::Encoding Encoding, const FormatStyle &Style)
diff --git a/lib/Format/CMakeLists.txt b/lib/Format/CMakeLists.txt
index cb46b9f255d2..c977c2d3c5fa 100644
--- a/lib/Format/CMakeLists.txt
+++ b/lib/Format/CMakeLists.txt
@@ -3,6 +3,7 @@ set(LLVM_LINK_COMPONENTS support)
add_clang_library(clangFormat
AffectedRangeManager.cpp
BreakableToken.cpp
+ Comments.cpp
ContinuationIndenter.cpp
Format.cpp
FormatToken.cpp
diff --git a/lib/Format/Comments.cpp b/lib/Format/Comments.cpp
new file mode 100644
index 000000000000..1b27f5b30a60
--- /dev/null
+++ b/lib/Format/Comments.cpp
@@ -0,0 +1,36 @@
+//===--- Comments.cpp - Comment Manipulation -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Implements comment manipulation.
+///
+//===----------------------------------------------------------------------===//
+
+#include "Comments.h"
+
+namespace clang {
+namespace format {
+
+StringRef getLineCommentIndentPrefix(StringRef Comment) {
+ static const char *const KnownPrefixes[] = {"///", "//", "//!"};
+ StringRef LongestPrefix;
+ for (StringRef KnownPrefix : KnownPrefixes) {
+ if (Comment.startswith(KnownPrefix)) {
+ size_t PrefixLength = KnownPrefix.size();
+ while (PrefixLength < Comment.size() && Comment[PrefixLength] == ' ')
+ ++PrefixLength;
+ if (PrefixLength > LongestPrefix.size())
+ LongestPrefix = Comment.substr(0, PrefixLength);
+ }
+ }
+ return LongestPrefix;
+}
+
+} // namespace format
+} // namespace clang
diff --git a/lib/Format/Comments.h b/lib/Format/Comments.h
new file mode 100644
index 000000000000..59f0596361a5
--- /dev/null
+++ b/lib/Format/Comments.h
@@ -0,0 +1,33 @@
+//===--- Comments.cpp - Comment manipulation -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// \brief Declares comment manipulation functionality.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_FORMAT_COMMENTS_H
+#define LLVM_CLANG_LIB_FORMAT_COMMENTS_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+namespace format {
+
+/// \brief Returns the comment prefix of the line comment \p Comment.
+///
+/// The comment prefix consists of a leading known prefix, like "//" or "///",
+/// together with the following whitespace.
+StringRef getLineCommentIndentPrefix(StringRef Comment);
+
+} // namespace format
+} // namespace clang
+
+#endif
diff --git a/lib/Format/ContinuationIndenter.cpp b/lib/Format/ContinuationIndenter.cpp
index 322969e4bb71..bf075ab6d53e 100644
--- a/lib/Format/ContinuationIndenter.cpp
+++ b/lib/Format/ContinuationIndenter.cpp
@@ -19,7 +19,6 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
#include "llvm/Support/Debug.h"
-#include <string>
#define DEBUG_TYPE "format-formatter"
@@ -178,6 +177,9 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
((Style.AllowShortFunctionsOnASingleLine != FormatStyle::SFS_All) ||
Style.BreakConstructorInitializersBeforeComma || Style.ColumnLimit != 0))
return true;
+ if (Current.is(TT_ObjCMethodExpr) && !Previous.is(TT_SelectorName) &&
+ State.Line->startsWith(TT_ObjCMethodSpecifier))
+ return true;
if (Current.is(TT_SelectorName) && State.Stack.back().ObjCSelectorNameFound &&
State.Stack.back().BreakBeforeParameter)
return true;
@@ -458,7 +460,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
Penalty += State.NextToken->SplitPenalty;
// Breaking before the first "<<" is generally not desirable if the LHS is
- // short. Also always add the penalty if the LHS is split over mutliple lines
+ // short. Also always add the penalty if the LHS is split over multiple lines
// to avoid unnecessary line breaks that just work around this penalty.
if (NextNonComment->is(tok::lessless) &&
State.Stack.back().FirstLessLess == 0 &&
@@ -521,7 +523,8 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
Style.ContinuationIndentWidth;
}
- if ((Previous.isOneOf(tok::comma, tok::semi) &&
+ if ((PreviousNonComment &&
+ PreviousNonComment->isOneOf(tok::comma, tok::semi) &&
!State.Stack.back().AvoidBinPacking) ||
Previous.is(TT_BinaryOperator))
State.Stack.back().BreakBeforeParameter = false;
@@ -557,6 +560,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// and we need to avoid bin packing there.
bool NestedBlockSpecialCase =
Style.Language != FormatStyle::LK_Cpp &&
+ Style.Language != FormatStyle::LK_ObjC &&
Current.is(tok::r_brace) && State.Stack.size() > 1 &&
State.Stack[State.Stack.size() - 2].NestedBlockInlined;
if (!NestedBlockSpecialCase)
@@ -672,6 +676,8 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return State.Stack.back().ColonPos - NextNonComment->ColumnWidth;
return State.Stack.back().Indent;
}
+ if (NextNonComment->is(tok::colon) && NextNonComment->is(TT_ObjCMethodExpr))
+ return State.Stack.back().ColonPos;
if (NextNonComment->is(TT_ArraySubscriptLSquare)) {
if (State.Stack.back().StartOfArraySubscripts != 0)
return State.Stack.back().StartOfArraySubscripts;
@@ -861,7 +867,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
// Exclude relational operators, as there, it is always more desirable to
// have the LHS 'left' of the RHS.
if (Previous && Previous->getPrecedence() != prec::Assignment &&
- Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) &&
+ Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr, tok::comma) &&
Previous->getPrecedence() != prec::Relational) {
bool BreakBeforeOperator =
Previous->is(tok::lessless) ||
diff --git a/lib/Format/Encoding.h b/lib/Format/Encoding.h
index 148f7fd0e91b..3339597b4edd 100644
--- a/lib/Format/Encoding.h
+++ b/lib/Format/Encoding.h
@@ -33,32 +33,13 @@ enum Encoding {
/// \brief Detects encoding of the Text. If the Text can be decoded using UTF-8,
/// it is considered UTF8, otherwise we treat it as some 8-bit encoding.
inline Encoding detectEncoding(StringRef Text) {
- const UTF8 *Ptr = reinterpret_cast<const UTF8 *>(Text.begin());
- const UTF8 *BufEnd = reinterpret_cast<const UTF8 *>(Text.end());
- if (::isLegalUTF8String(&Ptr, BufEnd))
+ const llvm::UTF8 *Ptr = reinterpret_cast<const llvm::UTF8 *>(Text.begin());
+ const llvm::UTF8 *BufEnd = reinterpret_cast<const llvm::UTF8 *>(Text.end());
+ if (llvm::isLegalUTF8String(&Ptr, BufEnd))
return Encoding_UTF8;
return Encoding_Unknown;
}
-inline unsigned getCodePointCountUTF8(StringRef Text) {
- unsigned CodePoints = 0;
- for (size_t i = 0, e = Text.size(); i < e; i += getNumBytesForUTF8(Text[i])) {
- ++CodePoints;
- }
- return CodePoints;
-}
-
-/// \brief Gets the number of code points in the Text using the specified
-/// Encoding.
-inline unsigned getCodePointCount(StringRef Text, Encoding Encoding) {
- switch (Encoding) {
- case Encoding_UTF8:
- return getCodePointCountUTF8(Text);
- default:
- return Text.size();
- }
-}
-
/// \brief Returns the number of columns required to display the \p Text on a
/// generic Unicode-capable terminal. Text is assumed to use the specified
/// \p Encoding.
@@ -97,7 +78,7 @@ inline unsigned columnWidthWithTabs(StringRef Text, unsigned StartColumn,
inline unsigned getCodePointNumBytes(char FirstChar, Encoding Encoding) {
switch (Encoding) {
case Encoding_UTF8:
- return getNumBytesForUTF8(FirstChar);
+ return llvm::getNumBytesForUTF8(FirstChar);
default:
return 1;
}
@@ -136,7 +117,7 @@ inline unsigned getEscapeSequenceLength(StringRef Text) {
++I;
return I;
}
- return 1 + getNumBytesForUTF8(Text[1]);
+ return 1 + llvm::getNumBytesForUTF8(Text[1]);
}
}
diff --git a/lib/Format/Format.cpp b/lib/Format/Format.cpp
index 32d6bb855ad6..70b90d6fa14e 100644
--- a/lib/Format/Format.cpp
+++ b/lib/Format/Format.cpp
@@ -36,7 +36,6 @@
#include "llvm/Support/YAMLTraits.h"
#include <algorithm>
#include <memory>
-#include <queue>
#include <string>
#define DEBUG_TYPE "format-formatter"
@@ -53,6 +52,7 @@ template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
IO.enumCase(Value, "Cpp", FormatStyle::LK_Cpp);
IO.enumCase(Value, "Java", FormatStyle::LK_Java);
IO.enumCase(Value, "JavaScript", FormatStyle::LK_JavaScript);
+ IO.enumCase(Value, "ObjC", FormatStyle::LK_ObjC);
IO.enumCase(Value, "Proto", FormatStyle::LK_Proto);
IO.enumCase(Value, "TableGen", FormatStyle::LK_TableGen);
}
@@ -339,6 +339,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("ReflowComments", Style.ReflowComments);
IO.mapOptional("SortIncludes", Style.SortIncludes);
IO.mapOptional("SpaceAfterCStyleCast", Style.SpaceAfterCStyleCast);
+ IO.mapOptional("SpaceAfterTemplateKeyword", Style.SpaceAfterTemplateKeyword);
IO.mapOptional("SpaceBeforeAssignmentOperators",
Style.SpaceBeforeAssignmentOperators);
IO.mapOptional("SpaceBeforeParens", Style.SpaceBeforeParens);
@@ -420,7 +421,7 @@ std::error_code make_error_code(ParseError e) {
return std::error_code(static_cast<int>(e), getParseCategory());
}
-const char *ParseErrorCategory::name() const LLVM_NOEXCEPT {
+const char *ParseErrorCategory::name() const noexcept {
return "clang-format.parse_error";
}
@@ -553,6 +554,7 @@ FormatStyle getLLVMStyle() {
LLVMStyle.SpacesInContainerLiterals = true;
LLVMStyle.SpacesInCStyleCastParentheses = false;
LLVMStyle.SpaceAfterCStyleCast = false;
+ LLVMStyle.SpaceAfterTemplateKeyword = true;
LLVMStyle.SpaceBeforeParens = FormatStyle::SBPO_ControlStatements;
LLVMStyle.SpaceBeforeAssignmentOperators = true;
LLVMStyle.SpacesInAngles = false;
@@ -609,10 +611,11 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
} else if (Language == FormatStyle::LK_JavaScript) {
GoogleStyle.AlignAfterOpenBracket = FormatStyle::BAS_AlwaysBreak;
GoogleStyle.AlignOperands = false;
- GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
+ GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Empty;
GoogleStyle.AlwaysBreakBeforeMultilineStrings = false;
GoogleStyle.BreakBeforeTernaryOperators = false;
- GoogleStyle.CommentPragmas = "@(export|requirecss|return|see|visibility) ";
+ GoogleStyle.CommentPragmas =
+ "(taze:|@(export|requirecss|return|returns|see|visibility)) ";
GoogleStyle.MaxEmptyLinesToKeep = 3;
GoogleStyle.NamespaceIndentation = FormatStyle::NI_All;
GoogleStyle.SpacesInContainerLiterals = false;
@@ -621,6 +624,8 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
} else if (Language == FormatStyle::LK_Proto) {
GoogleStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
GoogleStyle.SpacesInContainerLiterals = false;
+ } else if (Language == FormatStyle::LK_ObjC) {
+ GoogleStyle.ColumnLimit = 100;
}
return GoogleStyle;
@@ -650,10 +655,12 @@ FormatStyle getMozillaStyle() {
MozillaStyle.AllowAllParametersOfDeclarationOnNextLine = false;
MozillaStyle.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_Inline;
MozillaStyle.AlwaysBreakAfterReturnType =
- FormatStyle::RTBS_TopLevelDefinitions;
+ FormatStyle::RTBS_TopLevel;
MozillaStyle.AlwaysBreakAfterDefinitionReturnType =
FormatStyle::DRTBS_TopLevel;
MozillaStyle.AlwaysBreakTemplateDeclarations = true;
+ MozillaStyle.BinPackParameters = false;
+ MozillaStyle.BinPackArguments = false;
MozillaStyle.BreakBeforeBraces = FormatStyle::BS_Mozilla;
MozillaStyle.BreakConstructorInitializersBeforeComma = true;
MozillaStyle.ConstructorInitializerIndentWidth = 2;
@@ -664,6 +671,7 @@ FormatStyle getMozillaStyle() {
MozillaStyle.ObjCSpaceBeforeProtocolList = false;
MozillaStyle.PenaltyReturnTypeOnItsOwnLine = 200;
MozillaStyle.PointerAlignment = FormatStyle::PAS_Left;
+ MozillaStyle.SpaceAfterTemplateKeyword = false;
return MozillaStyle;
}
@@ -683,7 +691,6 @@ FormatStyle getWebKitStyle() {
Style.ObjCBlockIndentWidth = 4;
Style.ObjCSpaceAfterProperty = true;
Style.PointerAlignment = FormatStyle::PAS_Left;
- Style.Standard = FormatStyle::LS_Cpp03;
return Style;
}
@@ -791,46 +798,25 @@ std::string configurationAsText(const FormatStyle &Style) {
namespace {
-class Formatter : public TokenAnalyzer {
+class JavaScriptRequoter : public TokenAnalyzer {
public:
- Formatter(const Environment &Env, const FormatStyle &Style,
- bool *IncompleteFormat)
- : TokenAnalyzer(Env, Style), IncompleteFormat(IncompleteFormat) {}
+ JavaScriptRequoter(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {}
tooling::Replacements
analyze(TokenAnnotator &Annotator,
SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
- FormatTokenLexer &Tokens, tooling::Replacements &Result) override {
- deriveLocalStyle(AnnotatedLines);
+ FormatTokenLexer &Tokens) override {
AffectedRangeMgr.computeAffectedLines(AnnotatedLines.begin(),
AnnotatedLines.end());
-
- if (Style.Language == FormatStyle::LK_JavaScript &&
- Style.JavaScriptQuotes != FormatStyle::JSQS_Leave)
- requoteJSStringLiteral(AnnotatedLines, Result);
-
- for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
- Annotator.calculateFormattingInformation(*AnnotatedLines[i]);
- }
-
- Annotator.setCommentLineLevels(AnnotatedLines);
-
- WhitespaceManager Whitespaces(
- Env.getSourceManager(), Style,
- inputUsesCRLF(Env.getSourceManager().getBufferData(Env.getFileID())));
- ContinuationIndenter Indenter(Style, Tokens.getKeywords(),
- Env.getSourceManager(), Whitespaces, Encoding,
- BinPackInconclusiveFunctions);
- UnwrappedLineFormatter(&Indenter, &Whitespaces, Style, Tokens.getKeywords(),
- IncompleteFormat)
- .format(AnnotatedLines);
- return Whitespaces.generateReplacements();
+ tooling::Replacements Result;
+ requoteJSStringLiteral(AnnotatedLines, Result);
+ return Result;
}
private:
- // If the last token is a double/single-quoted string literal, generates a
- // replacement with a single/double quoted string literal, re-escaping the
- // contents in the process.
+ // Replaces double/single-quoted string literal as appropriate, re-escaping
+ // the contents in the process.
void requoteJSStringLiteral(SmallVectorImpl<AnnotatedLine *> &Lines,
tooling::Replacements &Result) {
for (AnnotatedLine *Line : Lines) {
@@ -842,8 +828,7 @@ private:
StringRef Input = FormatTok->TokenText;
if (FormatTok->Finalized || !FormatTok->isStringLiteral() ||
// NB: testing for not starting with a double quote to avoid
- // breaking
- // `template strings`.
+ // breaking `template strings`.
(Style.JavaScriptQuotes == FormatStyle::JSQS_Single &&
!Input.startswith("\"")) ||
(Style.JavaScriptQuotes == FormatStyle::JSQS_Double &&
@@ -855,15 +840,20 @@ private:
SourceLocation Start = FormatTok->Tok.getLocation();
auto Replace = [&](SourceLocation Start, unsigned Length,
StringRef ReplacementText) {
- Result.insert(tooling::Replacement(Env.getSourceManager(), Start,
- Length, ReplacementText));
+ auto Err = Result.add(tooling::Replacement(
+ Env.getSourceManager(), Start, Length, ReplacementText));
+ // FIXME: handle error. For now, print error message and skip the
+ // replacement for release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false);
+ }
};
Replace(Start, 1, IsSingle ? "'" : "\"");
Replace(FormatTok->Tok.getEndLoc().getLocWithOffset(-1), 1,
IsSingle ? "'" : "\"");
// Escape internal quotes.
- size_t ColumnWidth = FormatTok->TokenText.size();
bool Escaped = false;
for (size_t i = 1; i < Input.size() - 1; i++) {
switch (Input[i]) {
@@ -873,7 +863,6 @@ private:
(!IsSingle && Input[i + 1] == '\''))) {
// Remove this \, it's escaping a " or ' that no longer needs
// escaping
- ColumnWidth--;
Replace(Start.getLocWithOffset(i), 1, "");
continue;
}
@@ -884,7 +873,6 @@ private:
if (!Escaped && IsSingle == (Input[i] == '\'')) {
// Escape the quote.
Replace(Start.getLocWithOffset(i), 0, "\\");
- ColumnWidth++;
}
Escaped = false;
break;
@@ -893,16 +881,46 @@ private:
break;
}
}
-
- // For formatting, count the number of non-escaped single quotes in them
- // and adjust ColumnWidth to take the added escapes into account.
- // FIXME(martinprobst): this might conflict with code breaking a long
- // string literal (which clang-format doesn't do, yet). For that to
- // work, this code would have to modify TokenText directly.
- FormatTok->ColumnWidth = ColumnWidth;
}
}
}
+};
+
+class Formatter : public TokenAnalyzer {
+public:
+ Formatter(const Environment &Env, const FormatStyle &Style,
+ bool *IncompleteFormat)
+ : TokenAnalyzer(Env, Style), IncompleteFormat(IncompleteFormat) {}
+
+ tooling::Replacements
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ tooling::Replacements Result;
+ deriveLocalStyle(AnnotatedLines);
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines.begin(),
+ AnnotatedLines.end());
+ for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
+ Annotator.calculateFormattingInformation(*AnnotatedLines[i]);
+ }
+ Annotator.setCommentLineLevels(AnnotatedLines);
+
+ WhitespaceManager Whitespaces(
+ Env.getSourceManager(), Style,
+ inputUsesCRLF(Env.getSourceManager().getBufferData(Env.getFileID())));
+ ContinuationIndenter Indenter(Style, Tokens.getKeywords(),
+ Env.getSourceManager(), Whitespaces, Encoding,
+ BinPackInconclusiveFunctions);
+ UnwrappedLineFormatter(&Indenter, &Whitespaces, Style, Tokens.getKeywords(),
+ IncompleteFormat)
+ .format(AnnotatedLines);
+ for (const auto &R : Whitespaces.generateReplacements())
+ if (Result.add(R))
+ return Result;
+ return Result;
+ }
+
+private:
static bool inputUsesCRLF(StringRef Text) {
return Text.count('\r') * 2 > Text.count('\n');
@@ -991,7 +1009,7 @@ public:
tooling::Replacements
analyze(TokenAnnotator &Annotator,
SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
- FormatTokenLexer &Tokens, tooling::Replacements &Result) override {
+ FormatTokenLexer &Tokens) override {
// FIXME: in the current implementation the granularity of affected range
// is an annotated line. However, this is not sufficient. Furthermore,
// redundant code introduced by replacements does not necessarily
@@ -1008,8 +1026,11 @@ public:
if (Line->Affected) {
cleanupRight(Line->First, tok::comma, tok::comma);
cleanupRight(Line->First, TT_CtorInitializerColon, tok::comma);
+ cleanupRight(Line->First, tok::l_paren, tok::comma);
+ cleanupLeft(Line->First, tok::comma, tok::r_paren);
cleanupLeft(Line->First, TT_CtorInitializerComma, tok::l_brace);
cleanupLeft(Line->First, TT_CtorInitializerColon, tok::l_brace);
+ cleanupLeft(Line->First, TT_CtorInitializerColon, tok::equal);
}
}
@@ -1027,11 +1048,12 @@ private:
// Iterate through all lines and remove any empty (nested) namespaces.
void checkEmptyNamespace(SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
+ std::set<unsigned> DeletedLines;
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
auto &Line = *AnnotatedLines[i];
if (Line.startsWith(tok::kw_namespace) ||
Line.startsWith(tok::kw_inline, tok::kw_namespace)) {
- checkEmptyNamespace(AnnotatedLines, i, i);
+ checkEmptyNamespace(AnnotatedLines, i, i, DeletedLines);
}
}
@@ -1049,7 +1071,8 @@ private:
// sets \p NewLine to the last line checked.
// Returns true if the current namespace is empty.
bool checkEmptyNamespace(SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
- unsigned CurrentLine, unsigned &NewLine) {
+ unsigned CurrentLine, unsigned &NewLine,
+ std::set<unsigned> &DeletedLines) {
unsigned InitLine = CurrentLine, End = AnnotatedLines.size();
if (Style.BraceWrapping.AfterNamespace) {
// If the left brace is in a new line, we should consume it first so that
@@ -1069,7 +1092,8 @@ private:
if (AnnotatedLines[CurrentLine]->startsWith(tok::kw_namespace) ||
AnnotatedLines[CurrentLine]->startsWith(tok::kw_inline,
tok::kw_namespace)) {
- if (!checkEmptyNamespace(AnnotatedLines, CurrentLine, NewLine))
+ if (!checkEmptyNamespace(AnnotatedLines, CurrentLine, NewLine,
+ DeletedLines))
return false;
CurrentLine = NewLine;
continue;
@@ -1121,6 +1145,8 @@ private:
break;
if (Left->is(LK) && Right->is(RK)) {
deleteToken(DeleteLeft ? Left : Right);
+ for (auto *Tok = Left->Next; Tok && Tok != Right; Tok = Tok->Next)
+ deleteToken(Tok);
// If the right token is deleted, we should keep the left token
// unchanged and pair it with the new right token.
if (!DeleteLeft)
@@ -1164,7 +1190,14 @@ private:
}
auto SR = CharSourceRange::getCharRange(Tokens[St]->Tok.getLocation(),
Tokens[End]->Tok.getEndLoc());
- Fixes.insert(tooling::Replacement(Env.getSourceManager(), SR, ""));
+ auto Err =
+ Fixes.add(tooling::Replacement(Env.getSourceManager(), SR, ""));
+ // FIXME: better error handling. for now just print error message and skip
+ // for the release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false && "Fixes must not conflict!");
+ }
Idx = End + 1;
}
@@ -1186,8 +1219,6 @@ private:
// Tokens to be deleted.
std::set<FormatToken *, FormatTokenLess> DeletedTokens;
- // The line numbers of lines to be deleted.
- std::set<unsigned> DeletedLines;
};
struct IncludeDirective {
@@ -1210,15 +1241,50 @@ static bool affectsRange(ArrayRef<tooling::Range> Ranges, unsigned Start,
return false;
}
-// Sorts a block of includes given by 'Includes' alphabetically adding the
-// necessary replacement to 'Replaces'. 'Includes' must be in strict source
-// order.
+// Returns a pair (Index, OffsetToEOL) describing the position of the cursor
+// before sorting/deduplicating. Index is the index of the include under the
+// cursor in the original set of includes. If this include has duplicates, it is
+// the index of the first of the duplicates as the others are going to be
+// removed. OffsetToEOL describes the cursor's position relative to the end of
+// its current line.
+// If `Cursor` is not on any #include, `Index` will be UINT_MAX.
+static std::pair<unsigned, unsigned>
+FindCursorIndex(const SmallVectorImpl<IncludeDirective> &Includes,
+ const SmallVectorImpl<unsigned> &Indices, unsigned Cursor) {
+ unsigned CursorIndex = UINT_MAX;
+ unsigned OffsetToEOL = 0;
+ for (int i = 0, e = Includes.size(); i != e; ++i) {
+ unsigned Start = Includes[Indices[i]].Offset;
+ unsigned End = Start + Includes[Indices[i]].Text.size();
+ if (!(Cursor >= Start && Cursor < End))
+ continue;
+ CursorIndex = Indices[i];
+ OffsetToEOL = End - Cursor;
+ // Put the cursor on the only remaining #include among the duplicate
+ // #includes.
+ while (--i >= 0 && Includes[CursorIndex].Text == Includes[Indices[i]].Text)
+ CursorIndex = i;
+ break;
+ }
+ return std::make_pair(CursorIndex, OffsetToEOL);
+}
+
+// Sorts and deduplicate a block of includes given by 'Includes' alphabetically
+// adding the necessary replacement to 'Replaces'. 'Includes' must be in strict
+// source order.
+// #include directives with the same text will be deduplicated, and only the
+// first #include in the duplicate #includes remains. If the `Cursor` is
+// provided and put on a deleted #include, it will be moved to the remaining
+// #include in the duplicate #includes.
static void sortCppIncludes(const FormatStyle &Style,
- const SmallVectorImpl<IncludeDirective> &Includes,
- ArrayRef<tooling::Range> Ranges, StringRef FileName,
- tooling::Replacements &Replaces, unsigned *Cursor) {
- if (!affectsRange(Ranges, Includes.front().Offset,
- Includes.back().Offset + Includes.back().Text.size()))
+ const SmallVectorImpl<IncludeDirective> &Includes,
+ ArrayRef<tooling::Range> Ranges, StringRef FileName,
+ tooling::Replacements &Replaces, unsigned *Cursor) {
+ unsigned IncludesBeginOffset = Includes.front().Offset;
+ unsigned IncludesEndOffset =
+ Includes.back().Offset + Includes.back().Text.size();
+ unsigned IncludesBlockSize = IncludesEndOffset - IncludesBeginOffset;
+ if (!affectsRange(Ranges, IncludesBeginOffset, IncludesEndOffset))
return;
SmallVector<unsigned, 16> Indices;
for (unsigned i = 0, e = Includes.size(); i != e; ++i)
@@ -1228,37 +1294,45 @@ static void sortCppIncludes(const FormatStyle &Style,
return std::tie(Includes[LHSI].Category, Includes[LHSI].Filename) <
std::tie(Includes[RHSI].Category, Includes[RHSI].Filename);
});
+ // The index of the include on which the cursor will be put after
+ // sorting/deduplicating.
+ unsigned CursorIndex;
+ // The offset from cursor to the end of line.
+ unsigned CursorToEOLOffset;
+ if (Cursor)
+ std::tie(CursorIndex, CursorToEOLOffset) =
+ FindCursorIndex(Includes, Indices, *Cursor);
+
+ // Deduplicate #includes.
+ Indices.erase(std::unique(Indices.begin(), Indices.end(),
+ [&](unsigned LHSI, unsigned RHSI) {
+ return Includes[LHSI].Text == Includes[RHSI].Text;
+ }),
+ Indices.end());
// If the #includes are out of order, we generate a single replacement fixing
// the entire block. Otherwise, no replacement is generated.
- if (std::is_sorted(Indices.begin(), Indices.end()))
+ if (Indices.size() == Includes.size() &&
+ std::is_sorted(Indices.begin(), Indices.end()))
return;
std::string result;
- bool CursorMoved = false;
for (unsigned Index : Indices) {
if (!result.empty())
result += "\n";
result += Includes[Index].Text;
-
- if (Cursor && !CursorMoved) {
- unsigned Start = Includes[Index].Offset;
- unsigned End = Start + Includes[Index].Text.size();
- if (*Cursor >= Start && *Cursor < End) {
- *Cursor = Includes.front().Offset + result.size() + *Cursor - End;
- CursorMoved = true;
- }
- }
+ if (Cursor && CursorIndex == Index)
+ *Cursor = IncludesBeginOffset + result.size() - CursorToEOLOffset;
}
- // Sorting #includes shouldn't change their total number of characters.
- // This would otherwise mess up 'Ranges'.
- assert(result.size() ==
- Includes.back().Offset + Includes.back().Text.size() -
- Includes.front().Offset);
-
- Replaces.insert(tooling::Replacement(FileName, Includes.front().Offset,
- result.size(), result));
+ auto Err = Replaces.add(tooling::Replacement(
+ FileName, Includes.front().Offset, IncludesBlockSize, result));
+ // FIXME: better error handling. For now, just skip the replacement for the
+ // release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false);
+ }
}
namespace {
@@ -1403,14 +1477,13 @@ processReplacements(T ProcessFunc, StringRef Code,
auto NewCode = applyAllReplacements(Code, Replaces);
if (!NewCode)
return NewCode.takeError();
- std::vector<tooling::Range> ChangedRanges =
- tooling::calculateChangedRanges(Replaces);
+ std::vector<tooling::Range> ChangedRanges = Replaces.getAffectedRanges();
StringRef FileName = Replaces.begin()->getFilePath();
tooling::Replacements FormatReplaces =
ProcessFunc(Style, *NewCode, ChangedRanges, FileName);
- return mergeReplacements(Replaces, FormatReplaces);
+ return Replaces.merge(FormatReplaces);
}
llvm::Expected<tooling::Replacements>
@@ -1441,14 +1514,31 @@ formatReplacements(StringRef Code, const tooling::Replacements &Replaces,
namespace {
inline bool isHeaderInsertion(const tooling::Replacement &Replace) {
- return Replace.getOffset() == UINT_MAX &&
+ return Replace.getOffset() == UINT_MAX && Replace.getLength() == 0 &&
llvm::Regex(IncludeRegexPattern).match(Replace.getReplacementText());
}
-void skipComments(Lexer &Lex, Token &Tok) {
- while (Tok.is(tok::comment))
- if (Lex.LexFromRawLexer(Tok))
- return;
+inline bool isHeaderDeletion(const tooling::Replacement &Replace) {
+ return Replace.getOffset() == UINT_MAX && Replace.getLength() == 1;
+}
+
+// Returns the offset after skipping a sequence of tokens, matched by \p
+// GetOffsetAfterSequence, from the start of the code.
+// \p GetOffsetAfterSequence should be a function that matches a sequence of
+// tokens and returns an offset after the sequence.
+unsigned getOffsetAfterTokenSequence(
+ StringRef FileName, StringRef Code, const FormatStyle &Style,
+ std::function<unsigned(const SourceManager &, Lexer &, Token &)>
+ GetOffsetAfterSequense) {
+ std::unique_ptr<Environment> Env =
+ Environment::CreateVirtualEnvironment(Code, FileName, /*Ranges=*/{});
+ const SourceManager &SourceMgr = Env->getSourceManager();
+ Lexer Lex(Env->getFileID(), SourceMgr.getBuffer(Env->getFileID()), SourceMgr,
+ getFormattingLangOpts(Style));
+ Token Tok;
+ // Get the first token.
+ Lex.LexFromRawLexer(Tok);
+ return GetOffsetAfterSequense(SourceMgr, Lex, Tok);
}
// Check if a sequence of tokens is like "#<Name> <raw_identifier>". If it is,
@@ -1464,32 +1554,90 @@ bool checkAndConsumeDirectiveWithName(Lexer &Lex, StringRef Name, Token &Tok) {
return Matched;
}
+void skipComments(Lexer &Lex, Token &Tok) {
+ while (Tok.is(tok::comment))
+ if (Lex.LexFromRawLexer(Tok))
+ return;
+}
+
+// Returns the offset after header guard directives and any comments
+// before/after header guards. If no header guard presents in the code, this
+// will returns the offset after skipping all comments from the start of the
+// code.
unsigned getOffsetAfterHeaderGuardsAndComments(StringRef FileName,
StringRef Code,
const FormatStyle &Style) {
- std::unique_ptr<Environment> Env =
- Environment::CreateVirtualEnvironment(Code, FileName, /*Ranges=*/{});
- const SourceManager &SourceMgr = Env->getSourceManager();
- Lexer Lex(Env->getFileID(), SourceMgr.getBuffer(Env->getFileID()), SourceMgr,
- getFormattingLangOpts(Style));
- Token Tok;
- // Get the first token.
- Lex.LexFromRawLexer(Tok);
- skipComments(Lex, Tok);
- unsigned AfterComments = SourceMgr.getFileOffset(Tok.getLocation());
- if (checkAndConsumeDirectiveWithName(Lex, "ifndef", Tok)) {
- skipComments(Lex, Tok);
- if (checkAndConsumeDirectiveWithName(Lex, "define", Tok))
- return SourceMgr.getFileOffset(Tok.getLocation());
+ return getOffsetAfterTokenSequence(
+ FileName, Code, Style,
+ [](const SourceManager &SM, Lexer &Lex, Token Tok) {
+ skipComments(Lex, Tok);
+ unsigned InitialOffset = SM.getFileOffset(Tok.getLocation());
+ if (checkAndConsumeDirectiveWithName(Lex, "ifndef", Tok)) {
+ skipComments(Lex, Tok);
+ if (checkAndConsumeDirectiveWithName(Lex, "define", Tok))
+ return SM.getFileOffset(Tok.getLocation());
+ }
+ return InitialOffset;
+ });
+}
+
+// Check if a sequence of tokens is like
+// "#include ("header.h" | <header.h>)".
+// If it is, \p Tok will be the token after this directive; otherwise, it can be
+// any token after the given \p Tok (including \p Tok).
+bool checkAndConsumeInclusiveDirective(Lexer &Lex, Token &Tok) {
+ auto Matched = [&]() {
+ Lex.LexFromRawLexer(Tok);
+ return true;
+ };
+ if (Tok.is(tok::hash) && !Lex.LexFromRawLexer(Tok) &&
+ Tok.is(tok::raw_identifier) && Tok.getRawIdentifier() == "include") {
+ if (Lex.LexFromRawLexer(Tok))
+ return false;
+ if (Tok.is(tok::string_literal))
+ return Matched();
+ if (Tok.is(tok::less)) {
+ while (!Lex.LexFromRawLexer(Tok) && Tok.isNot(tok::greater)) {
+ }
+ if (Tok.is(tok::greater))
+ return Matched();
+ }
}
- return AfterComments;
+ return false;
+}
+
+// Returns the offset of the last #include directive after which a new
+// #include can be inserted. This ignores #include's after the #include block(s)
+// in the beginning of a file to avoid inserting headers into code sections
+// where new #include's should not be added by default.
+// These code sections include:
+// - raw string literals (containing #include).
+// - #if blocks.
+// - Special #include's among declarations (e.g. functions).
+//
+// If no #include after which a new #include can be inserted, this returns the
+// offset after skipping all comments from the start of the code.
+// Inserting after an #include is not allowed if it comes after code that is not
+// #include (e.g. pre-processing directive that is not #include, declarations).
+unsigned getMaxHeaderInsertionOffset(StringRef FileName, StringRef Code,
+ const FormatStyle &Style) {
+ return getOffsetAfterTokenSequence(
+ FileName, Code, Style,
+ [](const SourceManager &SM, Lexer &Lex, Token Tok) {
+ skipComments(Lex, Tok);
+ unsigned MaxOffset = SM.getFileOffset(Tok.getLocation());
+ while (checkAndConsumeInclusiveDirective(Lex, Tok))
+ MaxOffset = SM.getFileOffset(Tok.getLocation());
+ return MaxOffset;
+ });
+}
+
+bool isDeletedHeader(llvm::StringRef HeaderName,
+ const std::set<llvm::StringRef> &HeadersToDelete) {
+ return HeadersToDelete.count(HeaderName) ||
+ HeadersToDelete.count(HeaderName.trim("\"<>"));
}
-// FIXME: we also need to insert a '\n' at the end of the code if we have an
-// insertion with offset Code.size(), and there is no '\n' at the end of the
-// code.
-// FIXME: do not insert headers into conditional #include blocks, e.g. #includes
-// surrounded by compile condition "#if...".
// FIXME: insert empty lines between newly created blocks.
tooling::Replacements
fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
@@ -1498,20 +1646,25 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
return Replaces;
tooling::Replacements HeaderInsertions;
+ std::set<llvm::StringRef> HeadersToDelete;
+ tooling::Replacements Result;
for (const auto &R : Replaces) {
- if (isHeaderInsertion(R))
- HeaderInsertions.insert(R);
- else if (R.getOffset() == UINT_MAX)
+ if (isHeaderInsertion(R)) {
+ // Replacements from \p Replaces must be conflict-free already, so we can
+ // simply consume the error.
+ llvm::consumeError(HeaderInsertions.add(R));
+ } else if (isHeaderDeletion(R)) {
+ HeadersToDelete.insert(R.getReplacementText());
+ } else if (R.getOffset() == UINT_MAX) {
llvm::errs() << "Insertions other than header #include insertion are "
"not supported! "
<< R.getReplacementText() << "\n";
+ } else {
+ llvm::consumeError(Result.add(R));
+ }
}
- if (HeaderInsertions.empty())
+ if (HeaderInsertions.empty() && HeadersToDelete.empty())
return Replaces;
- tooling::Replacements Result;
- std::set_difference(Replaces.begin(), Replaces.end(),
- HeaderInsertions.begin(), HeaderInsertions.end(),
- std::inserter(Result, Result.begin()));
llvm::Regex IncludeRegex(IncludeRegexPattern);
llvm::Regex DefineRegex(R"(^[\t\ ]*#[\t\ ]*define[\t\ ]*[^\\]*$)");
@@ -1532,6 +1685,10 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
unsigned MinInsertOffset =
getOffsetAfterHeaderGuardsAndComments(FileName, Code, Style);
StringRef TrimmedCode = Code.drop_front(MinInsertOffset);
+ // Max insertion offset in the original code.
+ unsigned MaxInsertOffset =
+ MinInsertOffset +
+ getMaxHeaderInsertionOffset(FileName, TrimmedCode, Style);
SmallVector<StringRef, 32> Lines;
TrimmedCode.split(Lines, '\n');
unsigned Offset = MinInsertOffset;
@@ -1540,13 +1697,30 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
for (auto Line : Lines) {
NextLineOffset = std::min(Code.size(), Offset + Line.size() + 1);
if (IncludeRegex.match(Line, &Matches)) {
+ // The header name with quotes or angle brackets.
StringRef IncludeName = Matches[2];
ExistingIncludes.insert(IncludeName);
- int Category = Categories.getIncludePriority(
- IncludeName, /*CheckMainHeader=*/FirstIncludeOffset < 0);
- CategoryEndOffsets[Category] = NextLineOffset;
- if (FirstIncludeOffset < 0)
- FirstIncludeOffset = Offset;
+ // Only record the offset of current #include if we can insert after it.
+ if (Offset <= MaxInsertOffset) {
+ int Category = Categories.getIncludePriority(
+ IncludeName, /*CheckMainHeader=*/FirstIncludeOffset < 0);
+ CategoryEndOffsets[Category] = NextLineOffset;
+ if (FirstIncludeOffset < 0)
+ FirstIncludeOffset = Offset;
+ }
+ if (isDeletedHeader(IncludeName, HeadersToDelete)) {
+ // If this is the last line without trailing newline, we need to make
+ // sure we don't delete across the file boundary.
+ unsigned Length = std::min(Line.size() + 1, Code.size() - Offset);
+ llvm::Error Err =
+ Result.add(tooling::Replacement(FileName, Offset, Length, ""));
+ if (Err) {
+ // Ignore the deletion on conflict.
+ llvm::errs() << "Failed to add header deletion replacement for "
+ << IncludeName << ": " << llvm::toString(std::move(Err))
+ << "\n";
+ }
+ }
}
Offset = NextLineOffset;
}
@@ -1570,6 +1744,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
if (CategoryEndOffsets.find(*I) == CategoryEndOffsets.end())
CategoryEndOffsets[*I] = CategoryEndOffsets[*std::prev(I)];
+ bool NeedNewLineAtEnd = !Code.empty() && Code.back() != '\n';
for (const auto &R : HeaderInsertions) {
auto IncludeDirective = R.getReplacementText();
bool Matched = IncludeRegex.match(IncludeDirective, &Matches);
@@ -1588,7 +1763,20 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
std::string NewInclude = !IncludeDirective.endswith("\n")
? (IncludeDirective + "\n").str()
: IncludeDirective.str();
- Result.insert(tooling::Replacement(FileName, Offset, 0, NewInclude));
+ // When inserting headers at end of the code, also append '\n' to the code
+ // if it does not end with '\n'.
+ if (NeedNewLineAtEnd && Offset == Code.size()) {
+ NewInclude = "\n" + NewInclude;
+ NeedNewLineAtEnd = false;
+ }
+ auto NewReplace = tooling::Replacement(FileName, Offset, 0, NewInclude);
+ auto Err = Result.add(NewReplace);
+ if (Err) {
+ llvm::consumeError(std::move(Err));
+ unsigned NewOffset = Result.getShiftedCodePosition(Offset);
+ NewReplace = tooling::Replacement(FileName, NewOffset, 0, NewInclude);
+ Result = Result.merge(tooling::Replacements(NewReplace));
+ }
}
return Result;
}
@@ -1611,18 +1799,6 @@ cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces,
return processReplacements(Cleanup, Code, NewReplaces, Style);
}
-tooling::Replacements reformat(const FormatStyle &Style, SourceManager &SM,
- FileID ID, ArrayRef<CharSourceRange> Ranges,
- bool *IncompleteFormat) {
- FormatStyle Expanded = expandPresets(Style);
- if (Expanded.DisableFormat)
- return tooling::Replacements();
-
- Environment Env(SM, ID, Ranges);
- Formatter Format(Env, Expanded, IncompleteFormat);
- return Format.process();
-}
-
tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName, bool *IncompleteFormat) {
@@ -1630,19 +1806,28 @@ tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
if (Expanded.DisableFormat)
return tooling::Replacements();
- std::unique_ptr<Environment> Env =
- Environment::CreateVirtualEnvironment(Code, FileName, Ranges);
+ auto Env = Environment::CreateVirtualEnvironment(Code, FileName, Ranges);
+
+ if (Style.Language == FormatStyle::LK_JavaScript &&
+ Style.JavaScriptQuotes != FormatStyle::JSQS_Leave) {
+ JavaScriptRequoter Requoter(*Env, Expanded);
+ tooling::Replacements Requotes = Requoter.process();
+ if (!Requotes.empty()) {
+ auto NewCode = applyAllReplacements(Code, Requotes);
+ if (NewCode) {
+ auto NewEnv = Environment::CreateVirtualEnvironment(
+ *NewCode, FileName,
+ tooling::calculateRangesAfterReplacements(Requotes, Ranges));
+ Formatter Format(*NewEnv, Expanded, IncompleteFormat);
+ return Requotes.merge(Format.process());
+ }
+ }
+ }
+
Formatter Format(*Env, Expanded, IncompleteFormat);
return Format.process();
}
-tooling::Replacements cleanup(const FormatStyle &Style, SourceManager &SM,
- FileID ID, ArrayRef<CharSourceRange> Ranges) {
- Environment Env(SM, ID, Ranges);
- Cleaner Clean(Env, Style);
- return Clean.process();
-}
-
tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName) {
@@ -1684,6 +1869,8 @@ static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
return FormatStyle::LK_Java;
if (FileName.endswith_lower(".js") || FileName.endswith_lower(".ts"))
return FormatStyle::LK_JavaScript; // JavaScript or TypeScript.
+ if (FileName.endswith(".m") || FileName.endswith(".mm"))
+ return FormatStyle::LK_ObjC;
if (FileName.endswith_lower(".proto") ||
FileName.endswith_lower(".protodevel"))
return FormatStyle::LK_Proto;
@@ -1693,12 +1880,21 @@ static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
}
FormatStyle getStyle(StringRef StyleName, StringRef FileName,
- StringRef FallbackStyle, vfs::FileSystem *FS) {
+ StringRef FallbackStyle, StringRef Code,
+ vfs::FileSystem *FS) {
if (!FS) {
FS = vfs::getRealFileSystem().get();
}
FormatStyle Style = getLLVMStyle();
Style.Language = getLanguageByFileName(FileName);
+
+ // This is a very crude detection of whether a header contains ObjC code that
+ // should be improved over time and probably be done on tokens, not one the
+ // bare content of the file.
+ if (Style.Language == FormatStyle::LK_Cpp && FileName.endswith(".h") &&
+ (Code.contains("\n- (") || Code.contains("\n+ (")))
+ Style.Language = FormatStyle::LK_ObjC;
+
if (!getPredefinedStyle(FallbackStyle, Style.Language, &Style)) {
llvm::errs() << "Invalid fallback style \"" << FallbackStyle
<< "\" using LLVM style\n";
@@ -1724,7 +1920,11 @@ FormatStyle getStyle(StringRef StyleName, StringRef FileName,
// Look for .clang-format/_clang-format file in the file's parent directories.
SmallString<128> UnsuitableConfigFiles;
SmallString<128> Path(FileName);
- llvm::sys::fs::make_absolute(Path);
+ if (std::error_code EC = FS->makeAbsolute(Path)) {
+ llvm::errs() << EC.message() << "\n";
+ return Style;
+ }
+
for (StringRef Directory = Path; !Directory.empty();
Directory = llvm::sys::path::parent_path(Directory)) {
diff --git a/lib/Format/FormatToken.cpp b/lib/Format/FormatToken.cpp
index 2ae4ddcfd08a..ba5bf03a6346 100644
--- a/lib/Format/FormatToken.cpp
+++ b/lib/Format/FormatToken.cpp
@@ -13,9 +13,8 @@
///
//===----------------------------------------------------------------------===//
-#include "ContinuationIndenter.h"
#include "FormatToken.h"
-#include "clang/Format/Format.h"
+#include "ContinuationIndenter.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Debug.h"
#include <climits>
@@ -78,6 +77,9 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
if (State.NextToken == nullptr || !State.NextToken->Previous)
return 0;
+ if (Formats.size() == 1)
+ return 0; // Handled by formatFromToken
+
// Ensure that we start on the opening brace.
const FormatToken *LBrace =
State.NextToken->Previous->getPreviousNonComment();
@@ -93,6 +95,7 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
// Find the best ColumnFormat, i.e. the best number of columns to use.
const ColumnFormat *Format = getColumnFormat(RemainingCodePoints);
+
// If no ColumnFormat can be used, the braced list would generally be
// bin-packed. Add a severe penalty to this so that column layouts are
// preferred if possible.
@@ -130,7 +133,9 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
unsigned CommaSeparatedList::formatFromToken(LineState &State,
ContinuationIndenter *Indenter,
bool DryRun) {
- if (HasNestedBracedList)
+ // Formatting with 1 Column isn't really a column layout, so we don't need the
+ // special logic here. We can just avoid bin packing any of the parameters.
+ if (Formats.size() == 1 || HasNestedBracedList)
State.Stack.back().AvoidBinPacking = true;
return 0;
}
@@ -274,7 +279,7 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
continue;
// Ignore layouts that are bound to violate the column limit.
- if (Format.TotalWidth > Style.ColumnLimit)
+ if (Format.TotalWidth > Style.ColumnLimit && Columns > 1)
continue;
Formats.push_back(Format);
@@ -288,7 +293,7 @@ CommaSeparatedList::getColumnFormat(unsigned RemainingCharacters) const {
I = Formats.rbegin(),
E = Formats.rend();
I != E; ++I) {
- if (I->TotalWidth <= RemainingCharacters) {
+ if (I->TotalWidth <= RemainingCharacters || I->Columns == 1) {
if (BestFormat && I->LineCount > BestFormat->LineCount)
break;
BestFormat = &*I;
diff --git a/lib/Format/FormatToken.h b/lib/Format/FormatToken.h
index 43b162513620..ea3bbe368d5b 100644
--- a/lib/Format/FormatToken.h
+++ b/lib/Format/FormatToken.h
@@ -396,6 +396,21 @@ struct FormatToken {
}
}
+ /// \brief Returns \c true if this is a string literal that's like a label,
+ /// e.g. ends with "=" or ":".
+ bool isLabelString() const {
+ if (!is(tok::string_literal))
+ return false;
+ StringRef Content = TokenText;
+ if (Content.startswith("\"") || Content.startswith("'"))
+ Content = Content.drop_front(1);
+ if (Content.endswith("\"") || Content.endswith("'"))
+ Content = Content.drop_back(1);
+ Content = Content.trim();
+ return Content.size() > 1 &&
+ (Content.back() == ':' || Content.back() == '=');
+ }
+
/// \brief Returns actual token start location without leading escaped
/// newlines and whitespace.
///
@@ -580,12 +595,14 @@ struct AdditionalKeywords {
kw_as = &IdentTable.get("as");
kw_async = &IdentTable.get("async");
kw_await = &IdentTable.get("await");
+ kw_declare = &IdentTable.get("declare");
kw_finally = &IdentTable.get("finally");
kw_from = &IdentTable.get("from");
kw_function = &IdentTable.get("function");
kw_import = &IdentTable.get("import");
kw_is = &IdentTable.get("is");
kw_let = &IdentTable.get("let");
+ kw_module = &IdentTable.get("module");
kw_type = &IdentTable.get("type");
kw_var = &IdentTable.get("var");
kw_yield = &IdentTable.get("yield");
@@ -632,12 +649,14 @@ struct AdditionalKeywords {
IdentifierInfo *kw_as;
IdentifierInfo *kw_async;
IdentifierInfo *kw_await;
+ IdentifierInfo *kw_declare;
IdentifierInfo *kw_finally;
IdentifierInfo *kw_from;
IdentifierInfo *kw_function;
IdentifierInfo *kw_import;
IdentifierInfo *kw_is;
IdentifierInfo *kw_let;
+ IdentifierInfo *kw_module;
IdentifierInfo *kw_type;
IdentifierInfo *kw_var;
IdentifierInfo *kw_yield;
diff --git a/lib/Format/FormatTokenLexer.cpp b/lib/Format/FormatTokenLexer.cpp
index 9778f84732d6..46a32a917dd9 100644
--- a/lib/Format/FormatTokenLexer.cpp
+++ b/lib/Format/FormatTokenLexer.cpp
@@ -26,12 +26,11 @@ namespace format {
FormatTokenLexer::FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
const FormatStyle &Style,
encoding::Encoding Encoding)
- : FormatTok(nullptr), IsFirstToken(true), GreaterStashed(false),
- LessStashed(false), Column(0), TrailingWhitespace(0),
- SourceMgr(SourceMgr), ID(ID), Style(Style),
- IdentTable(getFormattingLangOpts(Style)), Keywords(IdentTable),
- Encoding(Encoding), FirstInLineIndex(0), FormattingDisabled(false),
- MacroBlockBeginRegex(Style.MacroBlockBegin),
+ : FormatTok(nullptr), IsFirstToken(true), StateStack({LexerState::NORMAL}),
+ Column(0), TrailingWhitespace(0), SourceMgr(SourceMgr), ID(ID),
+ Style(Style), IdentTable(getFormattingLangOpts(Style)),
+ Keywords(IdentTable), Encoding(Encoding), FirstInLineIndex(0),
+ FormattingDisabled(false), MacroBlockBeginRegex(Style.MacroBlockBegin),
MacroBlockEndRegex(Style.MacroBlockEnd) {
Lex.reset(new Lexer(ID, SourceMgr.getBuffer(ID), SourceMgr,
getFormattingLangOpts(Style)));
@@ -49,7 +48,7 @@ ArrayRef<FormatToken *> FormatTokenLexer::lex() {
Tokens.push_back(getNextToken());
if (Style.Language == FormatStyle::LK_JavaScript) {
tryParseJSRegexLiteral();
- tryParseTemplateString();
+ handleTemplateStrings();
}
tryMergePreviousTokens();
if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
@@ -228,17 +227,44 @@ void FormatTokenLexer::tryParseJSRegexLiteral() {
resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
}
-void FormatTokenLexer::tryParseTemplateString() {
+void FormatTokenLexer::handleTemplateStrings() {
FormatToken *BacktickToken = Tokens.back();
- if (!BacktickToken->is(tok::unknown) || BacktickToken->TokenText != "`")
+
+ if (BacktickToken->is(tok::l_brace)) {
+ StateStack.push(LexerState::NORMAL);
return;
+ }
+ if (BacktickToken->is(tok::r_brace)) {
+ if (StateStack.size() == 1)
+ return;
+ StateStack.pop();
+ if (StateStack.top() != LexerState::TEMPLATE_STRING)
+ return;
+ // If back in TEMPLATE_STRING, fallthrough and continue parsing the
+ } else if (BacktickToken->is(tok::unknown) &&
+ BacktickToken->TokenText == "`") {
+ StateStack.push(LexerState::TEMPLATE_STRING);
+ } else {
+ return; // Not actually a template
+ }
// 'Manually' lex ahead in the current file buffer.
const char *Offset = Lex->getBufferLocation();
const char *TmplBegin = Offset - BacktickToken->TokenText.size(); // at "`"
- for (; Offset != Lex->getBuffer().end() && *Offset != '`'; ++Offset) {
- if (*Offset == '\\')
+ for (; Offset != Lex->getBuffer().end(); ++Offset) {
+ if (Offset[0] == '`') {
+ StateStack.pop();
+ break;
+ }
+ if (Offset[0] == '\\') {
++Offset; // Skip the escaped character.
+ } else if (Offset + 1 < Lex->getBuffer().end() && Offset[0] == '$' &&
+ Offset[1] == '{') {
+ // '${' introduces an expression interpolation in the template string.
+ StateStack.push(LexerState::NORMAL);
+ ++Offset;
+ break;
+ }
}
StringRef LiteralText(TmplBegin, Offset - TmplBegin + 1);
@@ -262,7 +288,10 @@ void FormatTokenLexer::tryParseTemplateString() {
Style.TabWidth, Encoding);
}
- resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset + 1)));
+ SourceLocation loc = Offset < Lex->getBuffer().end()
+ ? Lex->getSourceLocation(Offset + 1)
+ : SourceMgr.getLocForEndOfFile(ID);
+ resetLexer(SourceMgr.getFileOffset(loc));
}
bool FormatTokenLexer::tryMerge_TMacro() {
@@ -384,12 +413,8 @@ FormatToken *FormatTokenLexer::getStashedToken() {
}
FormatToken *FormatTokenLexer::getNextToken() {
- if (GreaterStashed) {
- GreaterStashed = false;
- return getStashedToken();
- }
- if (LessStashed) {
- LessStashed = false;
+ if (StateStack.top() == LexerState::TOKEN_STASHED) {
+ StateStack.pop();
return getStashedToken();
}
@@ -500,11 +525,13 @@ FormatToken *FormatTokenLexer::getNextToken() {
} else if (FormatTok->Tok.is(tok::greatergreater)) {
FormatTok->Tok.setKind(tok::greater);
FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
- GreaterStashed = true;
+ ++Column;
+ StateStack.push(LexerState::TOKEN_STASHED);
} else if (FormatTok->Tok.is(tok::lessless)) {
FormatTok->Tok.setKind(tok::less);
FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
- LessStashed = true;
+ ++Column;
+ StateStack.push(LexerState::TOKEN_STASHED);
}
// Now FormatTok is the next non-whitespace token.
@@ -531,7 +558,8 @@ FormatToken *FormatTokenLexer::getNextToken() {
Column = FormatTok->LastLineColumnWidth;
}
- if (Style.Language == FormatStyle::LK_Cpp) {
+ if (Style.Language == FormatStyle::LK_Cpp ||
+ Style.Language == FormatStyle::LK_ObjC) {
if (!(Tokens.size() > 0 && Tokens.back()->Tok.getIdentifierInfo() &&
Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() ==
tok::pp_define) &&
diff --git a/lib/Format/FormatTokenLexer.h b/lib/Format/FormatTokenLexer.h
index fa8c8882574f..c47b0e725d36 100644
--- a/lib/Format/FormatTokenLexer.h
+++ b/lib/Format/FormatTokenLexer.h
@@ -23,9 +23,17 @@
#include "clang/Format/Format.h"
#include "llvm/Support/Regex.h"
+#include <stack>
+
namespace clang {
namespace format {
+enum LexerState {
+ NORMAL,
+ TEMPLATE_STRING,
+ TOKEN_STASHED,
+};
+
class FormatTokenLexer {
public:
FormatTokenLexer(const SourceManager &SourceMgr, FileID ID,
@@ -53,7 +61,16 @@ private:
// its text if successful.
void tryParseJSRegexLiteral();
- void tryParseTemplateString();
+ // Handles JavaScript template strings.
+ //
+ // JavaScript template strings use backticks ('`') as delimiters, and allow
+ // embedding expressions nested in ${expr-here}. Template strings can be
+ // nested recursively, i.e. expressions can contain template strings in turn.
+ //
+ // The code below parses starting from a backtick, up to a closing backtick or
+ // an opening ${. It also maintains a stack of lexing contexts to handle
+ // nested template parts by balancing curly braces.
+ void handleTemplateStrings();
bool tryMerge_TMacro();
@@ -65,7 +82,7 @@ private:
FormatToken *FormatTok;
bool IsFirstToken;
- bool GreaterStashed, LessStashed;
+ std::stack<LexerState> StateStack;
unsigned Column;
unsigned TrailingWhitespace;
std::unique_ptr<Lexer> Lex;
diff --git a/lib/Format/SortJavaScriptImports.cpp b/lib/Format/SortJavaScriptImports.cpp
index 32d5d756a3f0..e73695ca8477 100644
--- a/lib/Format/SortJavaScriptImports.cpp
+++ b/lib/Format/SortJavaScriptImports.cpp
@@ -1,4 +1,4 @@
-//===--- SortJavaScriptImports.h - Sort ES6 Imports -------------*- C++ -*-===//
+//===--- SortJavaScriptImports.cpp - Sort ES6 Imports -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,7 +13,6 @@
//===----------------------------------------------------------------------===//
#include "SortJavaScriptImports.h"
-#include "SortJavaScriptImports.h"
#include "TokenAnalyzer.h"
#include "TokenAnnotator.h"
#include "clang/Basic/Diagnostic.h"
@@ -127,7 +126,8 @@ public:
tooling::Replacements
analyze(TokenAnnotator &Annotator,
SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
- FormatTokenLexer &Tokens, tooling::Replacements &Result) override {
+ FormatTokenLexer &Tokens) override {
+ tooling::Replacements Result;
AffectedRangeMgr.computeAffectedLines(AnnotatedLines.begin(),
AnnotatedLines.end());
@@ -192,9 +192,15 @@ public:
DEBUG(llvm::dbgs() << "Replacing imports:\n"
<< getSourceText(InsertionPoint) << "\nwith:\n"
<< ReferencesText << "\n");
- Result.insert(tooling::Replacement(
+ auto Err = Result.add(tooling::Replacement(
Env.getSourceManager(), CharSourceRange::getCharRange(InsertionPoint),
ReferencesText));
+ // FIXME: better error handling. For now, just print error message and skip
+ // the replacement for the release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false);
+ }
return Result;
}
@@ -276,16 +282,9 @@ private:
SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
SmallVector<JsModuleReference, 16> References;
SourceLocation Start;
- bool FoundLines = false;
AnnotatedLine *FirstNonImportLine = nullptr;
+ bool AnyImportAffected = false;
for (auto Line : AnnotatedLines) {
- if (!Line->Affected) {
- // Only sort the first contiguous block of affected lines.
- if (FoundLines)
- break;
- else
- continue;
- }
Current = Line->First;
LineEnd = Line->Last;
skipComments();
@@ -294,15 +293,20 @@ private:
// of the import that immediately follows them by using the previously
// set Start.
Start = Line->First->Tok.getLocation();
- if (!Current)
- continue; // Only comments on this line.
- FoundLines = true;
+ if (!Current) {
+ // Only comments on this line. Could be the first non-import line.
+ FirstNonImportLine = Line;
+ continue;
+ }
JsModuleReference Reference;
Reference.Range.setBegin(Start);
if (!parseModuleReference(Keywords, Reference)) {
- FirstNonImportLine = Line;
+ if (!FirstNonImportLine)
+ FirstNonImportLine = Line; // if no comment before.
break;
}
+ FirstNonImportLine = nullptr;
+ AnyImportAffected = AnyImportAffected || Line->Affected;
Reference.Range.setEnd(LineEnd->Tok.getEndLoc());
DEBUG({
llvm::dbgs() << "JsModuleReference: {"
@@ -319,6 +323,9 @@ private:
References.push_back(Reference);
Start = SourceLocation();
}
+ // Sort imports if any import line was affected.
+ if (!AnyImportAffected)
+ References.clear();
return std::make_pair(References, FirstNonImportLine);
}
@@ -342,7 +349,6 @@ private:
if (!parseModuleBindings(Keywords, Reference))
return false;
- nextToken();
if (Current->is(Keywords.kw_from)) {
// imports have a 'from' clause, exports might not.
@@ -385,19 +391,28 @@ private:
if (Current->isNot(tok::identifier))
return false;
Reference.Prefix = Current->TokenText;
+ nextToken();
return true;
}
bool parseNamedBindings(const AdditionalKeywords &Keywords,
JsModuleReference &Reference) {
+ if (Current->is(tok::identifier)) {
+ nextToken();
+ if (Current->is(Keywords.kw_from))
+ return true;
+ if (Current->isNot(tok::comma))
+ return false;
+ nextToken(); // eat comma.
+ }
if (Current->isNot(tok::l_brace))
return false;
// {sym as alias, sym2 as ...} from '...';
- nextToken();
- while (true) {
+ while (Current->isNot(tok::r_brace)) {
+ nextToken();
if (Current->is(tok::r_brace))
- return true;
+ break;
if (Current->isNot(tok::identifier))
return false;
@@ -418,12 +433,11 @@ private:
Symbol.Range.setEnd(Current->Tok.getLocation());
Reference.Symbols.push_back(Symbol);
- if (Current->is(tok::r_brace))
- return true;
- if (Current->isNot(tok::comma))
+ if (!Current->isOneOf(tok::r_brace, tok::comma))
return false;
- nextToken();
}
+ nextToken(); // consume r_brace
+ return true;
}
};
diff --git a/lib/Format/TokenAnalyzer.cpp b/lib/Format/TokenAnalyzer.cpp
index 89ac35f3e842..f2e4e8ef0819 100644
--- a/lib/Format/TokenAnalyzer.cpp
+++ b/lib/Format/TokenAnalyzer.cpp
@@ -107,12 +107,12 @@ tooling::Replacements TokenAnalyzer::process() {
}
tooling::Replacements RunResult =
- analyze(Annotator, AnnotatedLines, Tokens, Result);
+ analyze(Annotator, AnnotatedLines, Tokens);
DEBUG({
llvm::dbgs() << "Replacements for run " << Run << ":\n";
- for (tooling::Replacements::iterator I = RunResult.begin(),
- E = RunResult.end();
+ for (tooling::Replacements::const_iterator I = RunResult.begin(),
+ E = RunResult.end();
I != E; ++I) {
llvm::dbgs() << I->toString() << "\n";
}
@@ -120,7 +120,15 @@ tooling::Replacements TokenAnalyzer::process() {
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
delete AnnotatedLines[i];
}
- Result.insert(RunResult.begin(), RunResult.end());
+ for (const auto &R : RunResult) {
+ auto Err = Result.add(R);
+ // FIXME: better error handling here. For now, simply return an empty
+ // Replacements to indicate failure.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ return tooling::Replacements();
+ }
+ }
}
return Result;
}
diff --git a/lib/Format/TokenAnalyzer.h b/lib/Format/TokenAnalyzer.h
index c1aa9c594fc3..78a3d1bc8d9e 100644
--- a/lib/Format/TokenAnalyzer.h
+++ b/lib/Format/TokenAnalyzer.h
@@ -31,8 +31,6 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h"
-#define DEBUG_TYPE "format-formatter"
-
namespace clang {
namespace format {
@@ -57,15 +55,12 @@ public:
FileID getFileID() const { return ID; }
- StringRef getFileName() const { return FileName; }
-
ArrayRef<CharSourceRange> getCharRanges() const { return CharRanges; }
const SourceManager &getSourceManager() const { return SM; }
private:
FileID ID;
- StringRef FileName;
SmallVector<CharSourceRange, 8> CharRanges;
SourceManager &SM;
@@ -87,7 +82,7 @@ protected:
virtual tooling::Replacements
analyze(TokenAnnotator &Annotator,
SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
- FormatTokenLexer &Tokens, tooling::Replacements &Result) = 0;
+ FormatTokenLexer &Tokens) = 0;
void consumeUnwrappedLine(const UnwrappedLine &TheLine) override;
diff --git a/lib/Format/TokenAnnotator.cpp b/lib/Format/TokenAnnotator.cpp
index 4a90522e6e31..cf6373f45657 100644
--- a/lib/Format/TokenAnnotator.cpp
+++ b/lib/Format/TokenAnnotator.cpp
@@ -273,8 +273,9 @@ private:
!CurrentToken->Next->HasUnescapedNewline &&
!CurrentToken->Next->isTrailingComment())
HasMultipleParametersOnALine = true;
- if (CurrentToken->isOneOf(tok::kw_const, tok::kw_auto) ||
- CurrentToken->isSimpleTypeSpecifier())
+ if ((CurrentToken->Previous->isOneOf(tok::kw_const, tok::kw_auto) ||
+ CurrentToken->Previous->isSimpleTypeSpecifier()) &&
+ !CurrentToken->is(tok::l_brace))
Contexts.back().IsExpression = false;
if (CurrentToken->isOneOf(tok::semi, tok::colon))
MightBeObjCForRangeLoop = false;
@@ -305,8 +306,19 @@ private:
FormatToken *Left = CurrentToken->Previous;
Left->ParentBracket = Contexts.back().ContextKind;
FormatToken *Parent = Left->getPreviousNonComment();
+
+ // Cases where '>' is followed by '['.
+ // In C++, this can happen either in array of templates (foo<int>[10])
+ // or when array is a nested template type (unique_ptr<type1<type2>[]>).
+ bool CppArrayTemplates =
+ Style.Language == FormatStyle::LK_Cpp && Parent &&
+ Parent->is(TT_TemplateCloser) &&
+ (Contexts.back().CanBeExpression || Contexts.back().IsExpression ||
+ Contexts.back().InTemplateArgument);
+
bool StartsObjCMethodExpr =
- Style.Language == FormatStyle::LK_Cpp &&
+ !CppArrayTemplates && (Style.Language == FormatStyle::LK_Cpp ||
+ Style.Language == FormatStyle::LK_ObjC) &&
Contexts.back().CanBeExpression && Left->isNot(TT_LambdaLSquare) &&
CurrentToken->isNot(tok::l_brace) &&
(!Parent ||
@@ -326,7 +338,7 @@ private:
Parent->isOneOf(tok::l_brace, tok::comma)) {
Left->Type = TT_JsComputedPropertyName;
} else if (Style.Language == FormatStyle::LK_Proto ||
- (Parent &&
+ (!CppArrayTemplates && Parent &&
Parent->isOneOf(TT_BinaryOperator, TT_TemplateCloser, tok::at,
tok::comma, tok::l_paren, tok::l_square,
tok::question, tok::colon, tok::kw_return,
@@ -422,7 +434,8 @@ private:
FormatToken *Previous = CurrentToken->getPreviousNonComment();
if (((CurrentToken->is(tok::colon) &&
(!Contexts.back().ColonIsDictLiteral ||
- Style.Language != FormatStyle::LK_Cpp)) ||
+ (Style.Language != FormatStyle::LK_Cpp &&
+ Style.Language != FormatStyle::LK_ObjC))) ||
Style.Language == FormatStyle::LK_Proto) &&
(Previous->Tok.getIdentifierInfo() ||
Previous->is(tok::string_literal)))
@@ -431,6 +444,9 @@ private:
Style.Language == FormatStyle::LK_JavaScript)
Left->Type = TT_DictLiteral;
}
+ if (CurrentToken->is(tok::comma) &&
+ Style.Language == FormatStyle::LK_JavaScript)
+ Left->Type = TT_DictLiteral;
if (!consumeToken())
return false;
}
@@ -508,19 +524,29 @@ private:
} else if (Contexts.back().ColonIsObjCMethodExpr ||
Line.startsWith(TT_ObjCMethodSpecifier)) {
Tok->Type = TT_ObjCMethodExpr;
- Tok->Previous->Type = TT_SelectorName;
- if (Tok->Previous->ColumnWidth >
- Contexts.back().LongestObjCSelectorName)
- Contexts.back().LongestObjCSelectorName = Tok->Previous->ColumnWidth;
- if (!Contexts.back().FirstObjCSelectorName)
- Contexts.back().FirstObjCSelectorName = Tok->Previous;
+ const FormatToken *BeforePrevious = Tok->Previous->Previous;
+ if (!BeforePrevious ||
+ !(BeforePrevious->is(TT_CastRParen) ||
+ (BeforePrevious->is(TT_ObjCMethodExpr) &&
+ BeforePrevious->is(tok::colon))) ||
+ BeforePrevious->is(tok::r_square) ||
+ Contexts.back().LongestObjCSelectorName == 0) {
+ Tok->Previous->Type = TT_SelectorName;
+ if (Tok->Previous->ColumnWidth >
+ Contexts.back().LongestObjCSelectorName)
+ Contexts.back().LongestObjCSelectorName =
+ Tok->Previous->ColumnWidth;
+ if (!Contexts.back().FirstObjCSelectorName)
+ Contexts.back().FirstObjCSelectorName = Tok->Previous;
+ }
} else if (Contexts.back().ColonIsForRangeExpr) {
Tok->Type = TT_RangeBasedForLoopColon;
} else if (CurrentToken && CurrentToken->is(tok::numeric_constant)) {
Tok->Type = TT_BitFieldColon;
} else if (Contexts.size() == 1 &&
!Line.First->isOneOf(tok::kw_enum, tok::kw_case)) {
- if (Tok->Previous->isOneOf(tok::r_paren, tok::kw_noexcept))
+ if (Tok->getPreviousNonComment()->isOneOf(tok::r_paren,
+ tok::kw_noexcept))
Tok->Type = TT_CtorInitializerColon;
else
Tok->Type = TT_InheritanceColon;
@@ -858,7 +884,8 @@ private:
if (!CurrentToken->isOneOf(TT_LambdaLSquare, TT_ForEachMacro,
TT_FunctionLBrace, TT_ImplicitStringLiteral,
TT_InlineASMBrace, TT_JsFatArrow, TT_LambdaArrow,
- TT_RegexLiteral))
+ TT_OverloadedOperator, TT_RegexLiteral,
+ TT_TemplateString))
CurrentToken->Type = TT_Unknown;
CurrentToken->Role.reset();
CurrentToken->MatchingParen = nullptr;
@@ -1037,12 +1064,17 @@ private:
!Current.Next->isBinaryOperator() &&
!Current.Next->isOneOf(tok::semi, tok::colon, tok::l_brace,
tok::period, tok::arrow, tok::coloncolon))
- if (FormatToken *BeforeParen = Current.MatchingParen->Previous)
- if (BeforeParen->is(tok::identifier) &&
- BeforeParen->TokenText == BeforeParen->TokenText.upper() &&
- (!BeforeParen->Previous ||
- BeforeParen->Previous->ClosesTemplateDeclaration))
- Current.Type = TT_FunctionAnnotationRParen;
+ if (FormatToken *AfterParen = Current.MatchingParen->Next) {
+ // Make sure this isn't the return type of an Obj-C block declaration
+ if (AfterParen->Tok.isNot(tok::caret)) {
+ if (FormatToken *BeforeParen = Current.MatchingParen->Previous)
+ if (BeforeParen->is(tok::identifier) &&
+ BeforeParen->TokenText == BeforeParen->TokenText.upper() &&
+ (!BeforeParen->Previous ||
+ BeforeParen->Previous->ClosesTemplateDeclaration))
+ Current.Type = TT_FunctionAnnotationRParen;
+ }
+ }
} else if (Current.is(tok::at) && Current.Next) {
if (Current.Next->isStringLiteral()) {
Current.Type = TT_ObjCStringLiteral;
@@ -1144,6 +1176,7 @@ private:
bool rParenEndsCast(const FormatToken &Tok) {
// C-style casts are only used in C++ and Java.
if (Style.Language != FormatStyle::LK_Cpp &&
+ Style.Language != FormatStyle::LK_ObjC &&
Style.Language != FormatStyle::LK_Java)
return false;
@@ -1206,6 +1239,13 @@ private:
if (!LeftOfParens)
return false;
+ // Certain token types inside the parentheses mean that this can't be a
+ // cast.
+ for (const FormatToken *Token = Tok.MatchingParen->Next; Token != &Tok;
+ Token = Token->Next)
+ if (Token->is(TT_BinaryOperator))
+ return false;
+
// If the following token is an identifier or 'this', this is a cast. All
// cases where this can be something else are handled above.
if (Tok.Next->isOneOf(tok::identifier, tok::kw_this))
@@ -1243,7 +1283,7 @@ private:
const FormatToken *NextToken = Tok.getNextNonComment();
if (!NextToken ||
- NextToken->isOneOf(tok::arrow, Keywords.kw_final,
+ NextToken->isOneOf(tok::arrow, Keywords.kw_final, tok::equal,
Keywords.kw_override) ||
(NextToken->is(tok::l_brace) && !NextToken->getNextNonComment()))
return TT_PointerOrReference;
@@ -1303,7 +1343,13 @@ private:
TokenType determinePlusMinusCaretUsage(const FormatToken &Tok) {
const FormatToken *PrevToken = Tok.getPreviousNonComment();
- if (!PrevToken || PrevToken->is(TT_CastRParen))
+ if (!PrevToken)
+ return TT_UnaryOperator;
+
+ if (PrevToken->isOneOf(TT_CastRParen, TT_UnaryOperator) &&
+ !PrevToken->is(tok::exclaim))
+ // There aren't any trailing unary operators except for TypeScript's
+ // non-null operator (!). Thus, this must be squence of leading operators.
return TT_UnaryOperator;
// Use heuristics to recognize unary operators.
@@ -1560,6 +1606,13 @@ void TokenAnnotator::setCommentLineLevels(
}
}
+static unsigned maxNestingDepth(const AnnotatedLine &Line) {
+ unsigned Result = 0;
+ for (const auto* Tok = Line.First; Tok != nullptr; Tok = Tok->Next)
+ Result = std::max(Result, Tok->NestingLevel);
+ return Result;
+}
+
void TokenAnnotator::annotate(AnnotatedLine &Line) {
for (SmallVectorImpl<AnnotatedLine *>::iterator I = Line.Children.begin(),
E = Line.Children.end();
@@ -1568,6 +1621,14 @@ void TokenAnnotator::annotate(AnnotatedLine &Line) {
}
AnnotatingParser Parser(Style, Line, Keywords);
Line.Type = Parser.parseLine();
+
+ // With very deep nesting, ExpressionParser uses lots of stack and the
+ // formatting algorithm is very slow. We're not going to do a good job here
+ // anyway - it's probably generated code being formatted by mistake.
+ // Just skip the whole line.
+ if (maxNestingDepth(Line) > 50)
+ Line.Type = LT_Invalid;
+
if (Line.Type == LT_Invalid)
return;
@@ -1816,10 +1877,12 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 100;
if (Left.is(TT_JsTypeColon))
return 35;
+ if ((Left.is(TT_TemplateString) && Left.TokenText.endswith("${")) ||
+ (Right.is(TT_TemplateString) && Right.TokenText.startswith("}")))
+ return 100;
}
- if (Left.is(tok::comma) || (Right.is(tok::identifier) && Right.Next &&
- Right.Next->is(TT_DictLiteral)))
+ if (Right.is(tok::identifier) && Right.Next && Right.Next->is(TT_DictLiteral))
return 1;
if (Right.is(tok::l_square)) {
if (Style.Language == FormatStyle::LK_Proto)
@@ -1935,20 +1998,24 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
if (Left.is(TT_JavaAnnotation))
return 50;
+ if (Left.isOneOf(tok::plus, tok::comma) && Left.Previous &&
+ Left.Previous->isLabelString() &&
+ (Left.NextOperator || Left.OperatorIndex != 0))
+ return 45;
+ if (Right.is(tok::plus) && Left.isLabelString() &&
+ (Right.NextOperator || Right.OperatorIndex != 0))
+ return 25;
+ if (Left.is(tok::comma))
+ return 1;
+ if (Right.is(tok::lessless) && Left.isLabelString() &&
+ (Right.NextOperator || Right.OperatorIndex != 1))
+ return 25;
if (Right.is(tok::lessless)) {
- if (Left.is(tok::string_literal) &&
- (Right.NextOperator || Right.OperatorIndex != 1)) {
- StringRef Content = Left.TokenText;
- if (Content.startswith("\""))
- Content = Content.drop_front(1);
- if (Content.endswith("\""))
- Content = Content.drop_back(1);
- Content = Content.trim();
- if (Content.size() > 1 &&
- (Content.back() == ':' || Content.back() == '='))
- return 25;
- }
- return 1; // Breaking at a << is really cheap.
+ // Breaking at a << is really cheap.
+ if (!Left.is(tok::r_paren) || Right.OperatorIndex > 0)
+ // Slightly prefer to break before the first one in log-like statements.
+ return 2;
+ return 1;
}
if (Left.is(TT_ConditionalExpr))
return prec::Conditional;
@@ -1984,9 +2051,10 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.isOneOf(tok::semi, tok::comma))
return false;
if (Right.is(tok::less) &&
- (Left.is(tok::kw_template) ||
- (Line.Type == LT_ObjCDecl && Style.ObjCSpaceBeforeProtocolList)))
+ Line.Type == LT_ObjCDecl && Style.ObjCSpaceBeforeProtocolList)
return true;
+ if (Right.is(tok::less) && Left.is(tok::kw_template))
+ return Style.SpaceAfterTemplateKeyword;
if (Left.isOneOf(tok::exclaim, tok::tilde))
return false;
if (Left.is(tok::at) &&
@@ -2011,7 +2079,9 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Left.Previous->is(tok::r_paren)) ||
(!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
(Style.PointerAlignment != FormatStyle::PAS_Left ||
- Line.IsMultiVariableDeclStmt)));
+ (Line.IsMultiVariableDeclStmt &&
+ (Left.NestingLevel == 0 ||
+ (Left.NestingLevel == 1 && Line.First->is(tok::kw_for)))))));
if (Right.is(TT_FunctionTypeLParen) && Left.isNot(tok::l_paren) &&
(!Left.is(TT_PointerOrReference) ||
(Style.PointerAlignment != FormatStyle::PAS_Right &&
@@ -2113,13 +2183,31 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
} else if (Style.Language == FormatStyle::LK_JavaScript) {
if (Left.is(TT_JsFatArrow))
return true;
+ if ((Left.is(TT_TemplateString) && Left.TokenText.endswith("${")) ||
+ (Right.is(TT_TemplateString) && Right.TokenText.startswith("}")))
+ return false;
+ if (Left.is(tok::identifier) && Right.is(TT_TemplateString))
+ return false;
if (Right.is(tok::star) &&
Left.isOneOf(Keywords.kw_function, Keywords.kw_yield))
return false;
+ if (Right.isOneOf(tok::l_brace, tok::l_square) &&
+ Left.isOneOf(Keywords.kw_function, Keywords.kw_yield))
+ return true;
+ // JS methods can use some keywords as names (e.g. `delete()`).
+ if (Right.is(tok::l_paren) && Line.MustBeDeclaration &&
+ Left.Tok.getIdentifierInfo())
+ return false;
if (Left.isOneOf(Keywords.kw_let, Keywords.kw_var, Keywords.kw_in,
Keywords.kw_of, tok::kw_const) &&
(!Left.Previous || !Left.Previous->is(tok::period)))
return true;
+ if (Left.isOneOf(tok::kw_for, Keywords.kw_as) && Left.Previous &&
+ Left.Previous->is(tok::period) && Right.is(tok::l_paren))
+ return false;
+ if (Left.is(Keywords.kw_as) &&
+ Right.isOneOf(tok::l_square, tok::l_brace, tok::l_paren))
+ return true;
if (Left.is(tok::kw_default) && Left.Previous &&
Left.Previous->is(tok::kw_export))
return true;
@@ -2146,6 +2234,8 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
tok::r_square, tok::r_brace) ||
Left.Tok.isLiteral()))
return false;
+ if (Left.is(tok::exclaim) && Right.is(Keywords.kw_as))
+ return true; // "x! as string"
} else if (Style.Language == FormatStyle::LK_Java) {
if (Left.is(tok::r_square) && Right.is(tok::l_brace))
return true;
@@ -2369,7 +2459,12 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
Keywords.kw_implements))
return true;
} else if (Style.Language == FormatStyle::LK_JavaScript) {
- if (Left.is(tok::kw_return))
+ const FormatToken *NonComment = Right.getPreviousNonComment();
+ if (Left.isOneOf(tok::kw_return, tok::kw_continue, tok::kw_break,
+ tok::kw_throw) ||
+ (NonComment &&
+ NonComment->isOneOf(tok::kw_return, tok::kw_continue, tok::kw_break,
+ tok::kw_throw)))
return false; // Otherwise a semicolon is inserted.
if (Left.is(TT_JsFatArrow) && Right.is(tok::l_brace))
return false;
@@ -2383,6 +2478,18 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None;
if (Right.is(Keywords.kw_as))
return false; // must not break before as in 'x as type' casts
+ if (Left.is(Keywords.kw_declare) &&
+ Right.isOneOf(Keywords.kw_module, tok::kw_namespace,
+ Keywords.kw_function, tok::kw_class, tok::kw_enum,
+ Keywords.kw_interface, Keywords.kw_type, Keywords.kw_var,
+ Keywords.kw_let, tok::kw_const))
+ // See grammar for 'declare' statements at:
+ // https://github.com/Microsoft/TypeScript/blob/master/doc/spec.md#A.10
+ return false;
+ if (Left.isOneOf(Keywords.kw_module, tok::kw_namespace) &&
+ Right.isOneOf(tok::identifier, tok::string_literal)) {
+ return false; // must not break in "module foo { ...}"
+ }
}
if (Left.is(tok::at))
@@ -2415,10 +2522,13 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return !Style.BreakBeforeTernaryOperators;
if (Right.is(TT_InheritanceColon))
return true;
+ if (Right.is(TT_ObjCMethodExpr) && !Right.is(tok::r_square) &&
+ Left.isNot(TT_SelectorName))
+ return true;
if (Right.is(tok::colon) &&
!Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon))
return false;
- if (Left.is(tok::colon) && (Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)))
+ if (Left.is(tok::colon) && Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr))
return true;
if (Right.is(TT_SelectorName) || (Right.is(tok::identifier) && Right.Next &&
Right.Next->is(TT_ObjCMethodExpr)))
@@ -2434,6 +2544,8 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return true;
if (Right.is(TT_RangeBasedForLoopColon))
return false;
+ if (Left.is(TT_TemplateCloser) && Right.is(TT_TemplateOpener))
+ return true;
if (Left.isOneOf(TT_TemplateCloser, TT_UnaryOperator) ||
Left.is(tok::kw_operator))
return false;
@@ -2522,7 +2634,8 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
<< " FakeLParens=";
for (unsigned i = 0, e = Tok->FakeLParens.size(); i != e; ++i)
llvm::errs() << Tok->FakeLParens[i] << "/";
- llvm::errs() << " FakeRParens=" << Tok->FakeRParens << "\n";
+ llvm::errs() << " FakeRParens=" << Tok->FakeRParens;
+ llvm::errs() << " Text='" << Tok->TokenText << "'\n";
if (!Tok->Next)
assert(Tok == Line.Last);
Tok = Tok->Next;
diff --git a/lib/Format/TokenAnnotator.h b/lib/Format/TokenAnnotator.h
index baa68ded9740..97daaf44ba99 100644
--- a/lib/Format/TokenAnnotator.h
+++ b/lib/Format/TokenAnnotator.h
@@ -18,7 +18,6 @@
#include "UnwrappedLineParser.h"
#include "clang/Format/Format.h"
-#include <string>
namespace clang {
class SourceManager;
diff --git a/lib/Format/UnwrappedLineFormatter.cpp b/lib/Format/UnwrappedLineFormatter.cpp
index 35035ea8afba..d7f1c4232d86 100644
--- a/lib/Format/UnwrappedLineFormatter.cpp
+++ b/lib/Format/UnwrappedLineFormatter.cpp
@@ -10,6 +10,7 @@
#include "UnwrappedLineFormatter.h"
#include "WhitespaceManager.h"
#include "llvm/Support/Debug.h"
+#include <queue>
#define DEBUG_TYPE "format-formatter"
@@ -150,7 +151,7 @@ public:
MergedLines = 0;
if (!DryRun)
for (unsigned i = 0; i < MergedLines; ++i)
- join(*Next[i], *Next[i + 1]);
+ join(*Next[0], *Next[i + 1]);
Next = Next + MergedLines + 1;
return Current;
}
diff --git a/lib/Format/UnwrappedLineFormatter.h b/lib/Format/UnwrappedLineFormatter.h
index 478617d6a88e..7bcead9d25e1 100644
--- a/lib/Format/UnwrappedLineFormatter.h
+++ b/lib/Format/UnwrappedLineFormatter.h
@@ -19,8 +19,6 @@
#include "ContinuationIndenter.h"
#include "clang/Format/Format.h"
#include <map>
-#include <queue>
-#include <string>
namespace clang {
namespace format {
diff --git a/lib/Format/UnwrappedLineParser.cpp b/lib/Format/UnwrappedLineParser.cpp
index 2fe72987bc7c..84e06d05c739 100644
--- a/lib/Format/UnwrappedLineParser.cpp
+++ b/lib/Format/UnwrappedLineParser.cpp
@@ -360,14 +360,15 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// BlockKind later if we parse a braced list (where all blocks
// inside are by default braced lists), or when we explicitly detect
// blocks (for example while parsing lambdas).
- //
- // We exclude + and - as they can be ObjC visibility modifiers.
ProbablyBracedList =
(Style.Language == FormatStyle::LK_JavaScript &&
- NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in)) ||
+ NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
+ Keywords.kw_as)) ||
NextTok->isOneOf(tok::comma, tok::period, tok::colon,
tok::r_paren, tok::r_square, tok::l_brace,
tok::l_square, tok::l_paren, tok::ellipsis) ||
+ (NextTok->is(tok::identifier) &&
+ !PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace)) ||
(NextTok->is(tok::semi) &&
(!ExpectClassBody || LBraceStack.size() != 1)) ||
(NextTok->isBinaryOperator() && !NextIsObjCMethod);
@@ -668,19 +669,21 @@ static bool mustBeJSIdent(const AdditionalKeywords &Keywords,
// FIXME: This returns true for C/C++ keywords like 'struct'.
return FormatTok->is(tok::identifier) &&
(FormatTok->Tok.getIdentifierInfo() == nullptr ||
- !FormatTok->isOneOf(Keywords.kw_in, Keywords.kw_of, Keywords.kw_as,
- Keywords.kw_async, Keywords.kw_await,
- Keywords.kw_yield, Keywords.kw_finally,
- Keywords.kw_function, Keywords.kw_import,
- Keywords.kw_is, Keywords.kw_let, Keywords.kw_var,
- Keywords.kw_abstract, Keywords.kw_extends,
- Keywords.kw_implements, Keywords.kw_instanceof,
- Keywords.kw_interface, Keywords.kw_throws));
+ !FormatTok->isOneOf(
+ Keywords.kw_in, Keywords.kw_of, Keywords.kw_as, Keywords.kw_async,
+ Keywords.kw_await, Keywords.kw_yield, Keywords.kw_finally,
+ Keywords.kw_function, Keywords.kw_import, Keywords.kw_is,
+ Keywords.kw_let, Keywords.kw_var, tok::kw_const,
+ Keywords.kw_abstract, Keywords.kw_extends, Keywords.kw_implements,
+ Keywords.kw_instanceof, Keywords.kw_interface,
+ Keywords.kw_throws));
}
static bool mustBeJSIdentOrValue(const AdditionalKeywords &Keywords,
const FormatToken *FormatTok) {
- return FormatTok->Tok.isLiteral() || mustBeJSIdent(Keywords, FormatTok);
+ return FormatTok->Tok.isLiteral() ||
+ FormatTok->isOneOf(tok::kw_true, tok::kw_false) ||
+ mustBeJSIdent(Keywords, FormatTok);
}
// isJSDeclOrStmt returns true if |FormatTok| starts a declaration or statement
@@ -724,6 +727,8 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
return;
bool PreviousMustBeValue = mustBeJSIdentOrValue(Keywords, Previous);
+ bool PreviousStartsTemplateExpr =
+ Previous->is(TT_TemplateString) && Previous->TokenText.endswith("${");
if (PreviousMustBeValue && Line && Line->Tokens.size() > 1) {
// If the token before the previous one is an '@', the previous token is an
// annotation and can precede another identifier/value.
@@ -734,9 +739,12 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
if (Next->is(tok::exclaim) && PreviousMustBeValue)
addUnwrappedLine();
bool NextMustBeValue = mustBeJSIdentOrValue(Keywords, Next);
- if (NextMustBeValue && (PreviousMustBeValue ||
- Previous->isOneOf(tok::r_square, tok::r_paren,
- tok::plusplus, tok::minusminus)))
+ bool NextEndsTemplateExpr =
+ Next->is(TT_TemplateString) && Next->TokenText.startswith("}");
+ if (NextMustBeValue && !NextEndsTemplateExpr && !PreviousStartsTemplateExpr &&
+ (PreviousMustBeValue ||
+ Previous->isOneOf(tok::r_square, tok::r_paren, tok::plusplus,
+ tok::minusminus)))
addUnwrappedLine();
if (PreviousMustBeValue && isJSDeclOrStmt(Keywords, Next))
addUnwrappedLine();
@@ -906,8 +914,8 @@ void UnwrappedLineParser::parseStructuralElement() {
if (FormatTok->is(tok::colon)) {
nextToken();
addUnwrappedLine();
+ return;
}
- return;
}
// In all other cases, parse the declaration.
break;
@@ -1222,9 +1230,11 @@ void UnwrappedLineParser::tryToParseJSFunction() {
// Consume "function".
nextToken();
- // Consume * (generator function).
- if (FormatTok->is(tok::star))
+ // Consume * (generator function). Treat it like C++'s overloaded operators.
+ if (FormatTok->is(tok::star)) {
+ FormatTok->Type = TT_OverloadedOperator;
nextToken();
+ }
// Consume function name.
if (FormatTok->is(tok::identifier))
diff --git a/lib/Format/WhitespaceManager.cpp b/lib/Format/WhitespaceManager.cpp
index 9cdba9df10a9..b64506f39035 100644
--- a/lib/Format/WhitespaceManager.cpp
+++ b/lib/Format/WhitespaceManager.cpp
@@ -42,11 +42,6 @@ WhitespaceManager::Change::Change(
TokenLength(0), PreviousEndOfTokenColumn(0), EscapedNewlineColumn(0),
StartOfBlockComment(nullptr), IndentationOffset(0) {}
-void WhitespaceManager::reset() {
- Changes.clear();
- Replaces.clear();
-}
-
void WhitespaceManager::replaceWhitespace(FormatToken &Tok, unsigned Newlines,
unsigned IndentLevel, unsigned Spaces,
unsigned StartOfTokenColumn,
@@ -432,7 +427,7 @@ void WhitespaceManager::alignTrailingComments(unsigned Start, unsigned End,
}
assert(Shift >= 0);
Changes[i].Spaces += Shift;
- if (i + 1 != End)
+ if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
Changes[i].StartOfTokenColumn += Shift;
}
@@ -502,8 +497,14 @@ void WhitespaceManager::storeReplacement(SourceRange Range,
if (StringRef(SourceMgr.getCharacterData(Range.getBegin()),
WhitespaceLength) == Text)
return;
- Replaces.insert(tooling::Replacement(
+ auto Err = Replaces.add(tooling::Replacement(
SourceMgr, CharSourceRange::getCharRange(Range), Text));
+ // FIXME: better error handling. For now, just print an error message in the
+ // release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false);
+ }
}
void WhitespaceManager::appendNewlineText(std::string &Text,
diff --git a/lib/Format/WhitespaceManager.h b/lib/Format/WhitespaceManager.h
index 3562347a0e60..f42e371830b3 100644
--- a/lib/Format/WhitespaceManager.h
+++ b/lib/Format/WhitespaceManager.h
@@ -41,9 +41,6 @@ public:
bool UseCRLF)
: SourceMgr(SourceMgr), Style(Style), UseCRLF(UseCRLF) {}
- /// \brief Prepares the \c WhitespaceManager for another run.
- void reset();
-
/// \brief Replaces the whitespace in front of \p Tok. Only call once for
/// each \c AnnotatedToken.
void replaceWhitespace(FormatToken &Tok, unsigned Newlines,
diff --git a/lib/Frontend/ASTConsumers.cpp b/lib/Frontend/ASTConsumers.cpp
index de72ea57e35b..bd2ee06d1653 100644
--- a/lib/Frontend/ASTConsumers.cpp
+++ b/lib/Frontend/ASTConsumers.cpp
@@ -19,7 +19,6 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Timer.h"
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index 76fd00a132b4..32ce966f798e 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -41,7 +41,6 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/MutexGuard.h"
-#include "llvm/Support/Path.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include <atomic>
@@ -926,7 +925,7 @@ public:
PrecompilePreambleConsumer(ASTUnit &Unit, PrecompilePreambleAction *Action,
const Preprocessor &PP, StringRef isysroot,
std::unique_ptr<raw_ostream> Out)
- : PCHGenerator(PP, "", nullptr, isysroot, std::make_shared<PCHBuffer>(),
+ : PCHGenerator(PP, "", isysroot, std::make_shared<PCHBuffer>(),
ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>>(),
/*AllowASTWithErrors=*/true),
Unit(Unit), Hash(Unit.getCurrentTopLevelHashValue()), Action(Action),
@@ -1393,7 +1392,8 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
}
OverriddenFiles[Status.getUniqueID()] = PreambleFileHash::createForFile(
- Status.getSize(), Status.getLastModificationTime().toEpochTime());
+ Status.getSize(),
+ llvm::sys::toTimeT(Status.getLastModificationTime()));
}
for (const auto &RB : PreprocessorOpts.RemappedFileBuffers) {
@@ -1434,8 +1434,8 @@ ASTUnit::getMainBufferWithPrecompiledPreamble(
// The file was not remapped; check whether it has changed on disk.
if (Status.getSize() != uint64_t(F->second.Size) ||
- Status.getLastModificationTime().toEpochTime() !=
- uint64_t(F->second.ModTime))
+ llvm::sys::toTimeT(Status.getLastModificationTime()) !=
+ F->second.ModTime)
AnyFileChanged = true;
}
@@ -2806,6 +2806,7 @@ const FileEntry *ASTUnit::getPCHFile() {
switch (M.Kind) {
case serialization::MK_ImplicitModule:
case serialization::MK_ExplicitModule:
+ case serialization::MK_PrebuiltModule:
return true; // skip dependencies.
case serialization::MK_PCH:
Mod = &M;
@@ -2825,7 +2826,7 @@ const FileEntry *ASTUnit::getPCHFile() {
}
bool ASTUnit::isModuleFile() {
- return isMainFileAST() && ASTFileLangOpts.CompilingModule;
+ return isMainFileAST() && ASTFileLangOpts.isCompilingModule();
}
void ASTUnit::PreambleData::countLines() const {
diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp
index 15b0adab7c5e..72e8f68dc051 100644
--- a/lib/Frontend/CacheTokens.cpp
+++ b/lib/Frontend/CacheTokens.cpp
@@ -12,12 +12,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/Utils.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemStatCache.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/Utils.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Preprocessor.h"
@@ -28,7 +28,6 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/raw_ostream.h"
// FIXME: put this somewhere else?
#ifndef S_ISDIR
@@ -59,23 +58,30 @@ public:
class PTHEntryKeyVariant {
- union { const FileEntry* FE; const char* Path; };
+ union {
+ const FileEntry *FE;
+ // FIXME: Use "StringRef Path;" when MSVC 2013 is dropped.
+ const char *PathPtr;
+ };
+ size_t PathSize;
enum { IsFE = 0x1, IsDE = 0x2, IsNoExist = 0x0 } Kind;
FileData *Data;
public:
PTHEntryKeyVariant(const FileEntry *fe) : FE(fe), Kind(IsFE), Data(nullptr) {}
- PTHEntryKeyVariant(FileData *Data, const char *path)
- : Path(path), Kind(IsDE), Data(new FileData(*Data)) {}
+ PTHEntryKeyVariant(FileData *Data, StringRef Path)
+ : PathPtr(Path.data()), PathSize(Path.size()), Kind(IsDE),
+ Data(new FileData(*Data)) {}
- explicit PTHEntryKeyVariant(const char *path)
- : Path(path), Kind(IsNoExist), Data(nullptr) {}
+ explicit PTHEntryKeyVariant(StringRef Path)
+ : PathPtr(Path.data()), PathSize(Path.size()), Kind(IsNoExist),
+ Data(nullptr) {}
bool isFile() const { return Kind == IsFE; }
StringRef getString() const {
- return Kind == IsFE ? FE->getName() : Path;
+ return Kind == IsFE ? FE->getName() : StringRef(PathPtr, PathSize);
}
unsigned getKind() const { return (unsigned) Kind; }
@@ -183,14 +189,14 @@ class PTHWriter {
typedef llvm::DenseMap<const IdentifierInfo*,uint32_t> IDMap;
typedef llvm::StringMap<OffsetOpt, llvm::BumpPtrAllocator> CachedStrsTy;
- IDMap IM;
raw_pwrite_stream &Out;
Preprocessor& PP;
- uint32_t idcount;
+ IDMap IM;
+ std::vector<llvm::StringMapEntry<OffsetOpt>*> StrEntries;
PTHMap PM;
CachedStrsTy CachedStrs;
+ uint32_t idcount;
Offset CurStrOffset;
- std::vector<llvm::StringMapEntry<OffsetOpt>*> StrEntries;
//// Get the persistent id for the given IdentifierInfo*.
uint32_t ResolveID(const IdentifierInfo* II);
@@ -550,7 +556,7 @@ public:
StatListener(PTHMap &pm) : PM(pm) {}
~StatListener() override {}
- LookupResult getStat(const char *Path, FileData &Data, bool isFile,
+ LookupResult getStat(StringRef Path, FileData &Data, bool isFile,
std::unique_ptr<vfs::File> *F,
vfs::FileSystem &FS) override {
LookupResult Result = statChained(Path, Data, isFile, F, FS);
diff --git a/lib/Frontend/ChainedIncludesSource.cpp b/lib/Frontend/ChainedIncludesSource.cpp
index 3f126615b1eb..c5b77ee90e56 100644
--- a/lib/Frontend/ChainedIncludesSource.cpp
+++ b/lib/Frontend/ChainedIncludesSource.cpp
@@ -17,6 +17,7 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Parse/ParseAST.h"
#include "clang/Sema/MultiplexExternalSemaSource.h"
#include "clang/Serialization/ASTReader.h"
@@ -160,7 +161,7 @@ IntrusiveRefCntPtr<ExternalSemaSource> clang::createChainedIncludesSource(
auto Buffer = std::make_shared<PCHBuffer>();
ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>> Extensions;
auto consumer = llvm::make_unique<PCHGenerator>(
- Clang->getPreprocessor(), "-", nullptr, /*isysroot=*/"", Buffer,
+ Clang->getPreprocessor(), "-", /*isysroot=*/"", Buffer,
Extensions, /*AllowASTWithErrors=*/true);
Clang->getASTContext().setASTMutationListener(
consumer->GetASTMutationListener());
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index 8b00a3d00879..ccddd14f0f34 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -29,6 +29,7 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Sema/CodeCompleteConsumer.h"
#include "clang/Sema/Sema.h"
#include "clang/Serialization/ASTReader.h"
@@ -140,6 +141,66 @@ void CompilerInstance::setModuleDepCollector(
ModuleDepCollector = std::move(Collector);
}
+static void collectHeaderMaps(const HeaderSearch &HS,
+ std::shared_ptr<ModuleDependencyCollector> MDC) {
+ SmallVector<std::string, 4> HeaderMapFileNames;
+ HS.getHeaderMapFileNames(HeaderMapFileNames);
+ for (auto &Name : HeaderMapFileNames)
+ MDC->addFile(Name);
+}
+
+static void collectIncludePCH(CompilerInstance &CI,
+ std::shared_ptr<ModuleDependencyCollector> MDC) {
+ const PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
+ if (PPOpts.ImplicitPCHInclude.empty())
+ return;
+
+ StringRef PCHInclude = PPOpts.ImplicitPCHInclude;
+ FileManager &FileMgr = CI.getFileManager();
+ const DirectoryEntry *PCHDir = FileMgr.getDirectory(PCHInclude);
+ if (!PCHDir) {
+ MDC->addFile(PCHInclude);
+ return;
+ }
+
+ std::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(PCHDir->getName(), DirNative);
+ vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ SimpleASTReaderListener Validator(CI.getPreprocessor());
+ for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ // Check whether this is an AST file. ASTReader::isAcceptableASTFile is not
+ // used here since we're not interested in validating the PCH at this time,
+ // but only to check whether this is a file containing an AST.
+ if (!ASTReader::readASTFileControlBlock(
+ Dir->getName(), FileMgr, CI.getPCHContainerReader(),
+ /*FindModuleFileExtensions=*/false, Validator,
+ /*ValidateDiagnosticOptions=*/false))
+ MDC->addFile(Dir->getName());
+ }
+}
+
+static void collectVFSEntries(CompilerInstance &CI,
+ std::shared_ptr<ModuleDependencyCollector> MDC) {
+ if (CI.getHeaderSearchOpts().VFSOverlayFiles.empty())
+ return;
+
+ // Collect all VFS found.
+ SmallVector<vfs::YAMLVFSEntry, 16> VFSEntries;
+ for (const std::string &VFSFile : CI.getHeaderSearchOpts().VFSOverlayFiles) {
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buffer =
+ llvm::MemoryBuffer::getFile(VFSFile);
+ if (!Buffer)
+ return;
+ vfs::collectVFSFromYAML(std::move(Buffer.get()), /*DiagHandler*/ nullptr,
+ VFSFile, VFSEntries);
+ }
+
+ for (auto &E : VFSEntries)
+ MDC->addFile(E.VPath, E.RPath);
+}
+
// Diagnostics
static void SetUpDiagnosticLog(DiagnosticOptions *DiagOpts,
const CodeGenOptions *CodeGenOpts,
@@ -333,9 +394,16 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
InitializePreprocessor(*PP, PPOpts, getPCHContainerReader(),
getFrontendOpts());
- // Initialize the header search object.
+ // Initialize the header search object. In CUDA compilations, we use the aux
+ // triple (the host triple) to initialize our header search, since we need to
+ // find the host headers in order to compile the CUDA code.
+ const llvm::Triple *HeaderSearchTriple = &PP->getTargetInfo().getTriple();
+ if (PP->getTargetInfo().getTriple().getOS() == llvm::Triple::CUDA &&
+ PP->getAuxTargetInfo())
+ HeaderSearchTriple = &PP->getAuxTargetInfo()->getTriple();
+
ApplyHeaderSearchOptions(PP->getHeaderSearchInfo(), getHeaderSearchOpts(),
- PP->getLangOpts(), PP->getTargetInfo().getTriple());
+ PP->getLangOpts(), *HeaderSearchTriple);
PP->setPreprocessedOutput(getPreprocessorOutputOpts().ShowCPP);
@@ -358,8 +426,14 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
DepOpts.ModuleDependencyOutputDir);
}
- if (ModuleDepCollector)
+ // If there is a module dep collector, register with other dep collectors
+ // and also (a) collect header maps and (b) TODO: input vfs overlay files.
+ if (ModuleDepCollector) {
addDependencyCollector(ModuleDepCollector);
+ collectHeaderMaps(PP->getHeaderSearchInfo(), ModuleDepCollector);
+ collectIncludePCH(*this, ModuleDepCollector);
+ collectVFSEntries(*this, ModuleDepCollector);
+ }
for (auto &Listener : DependencyCollectors)
Listener->attachToPreprocessor(*PP);
@@ -514,9 +588,11 @@ void CompilerInstance::createCodeCompletionConsumer() {
}
void CompilerInstance::createFrontendTimer() {
- FrontendTimerGroup.reset(new llvm::TimerGroup("Clang front-end time report"));
+ FrontendTimerGroup.reset(
+ new llvm::TimerGroup("frontend", "Clang front-end time report"));
FrontendTimer.reset(
- new llvm::Timer("Clang front-end timer", *FrontendTimerGroup));
+ new llvm::Timer("frontend", "Clang front-end timer",
+ *FrontendTimerGroup));
}
CodeCompleteConsumer *
@@ -537,6 +613,11 @@ void CompilerInstance::createSema(TranslationUnitKind TUKind,
CodeCompleteConsumer *CompletionConsumer) {
TheSema.reset(new Sema(getPreprocessor(), getASTContext(), getASTConsumer(),
TUKind, CompletionConsumer));
+ // Attach the external sema source if there is any.
+ if (ExternalSemaSrc) {
+ TheSema->addExternalSource(ExternalSemaSrc.get());
+ ExternalSemaSrc->InitializeSema(*TheSema);
+ }
}
// Output Files
@@ -841,6 +922,9 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
// created. This complexity should be lifted elsewhere.
getTarget().adjust(getLangOpts());
+ // Adjust target options based on codegen options.
+ getTarget().adjustTargetOptions(getCodeGenOpts(), getTargetOpts());
+
// rewriter project will change target built-in bool type from its default.
if (getFrontendOpts().ProgramAction == frontend::RewriteObjC)
getTarget().noSignedCharForObjCBool();
@@ -854,8 +938,8 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
if (getFrontendOpts().ShowTimers)
createFrontendTimer();
- if (getFrontendOpts().ShowStats)
- llvm::EnableStatistics();
+ if (getFrontendOpts().ShowStats || !getFrontendOpts().StatsFile.empty())
+ llvm::EnableStatistics(false);
for (const FrontendInputFile &FIF : getFrontendOpts().Inputs) {
// Reset the ID tables if we are reusing the SourceManager and parsing
@@ -888,9 +972,24 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
OS << " generated.\n";
}
- if (getFrontendOpts().ShowStats && hasFileManager()) {
- getFileManager().PrintStats();
- OS << "\n";
+ if (getFrontendOpts().ShowStats) {
+ if (hasFileManager()) {
+ getFileManager().PrintStats();
+ OS << '\n';
+ }
+ llvm::PrintStatistics(OS);
+ }
+ StringRef StatsFile = getFrontendOpts().StatsFile;
+ if (!StatsFile.empty()) {
+ std::error_code EC;
+ auto StatS = llvm::make_unique<llvm::raw_fd_ostream>(StatsFile, EC,
+ llvm::sys::fs::F_Text);
+ if (EC) {
+ getDiagnostics().Report(diag::warn_fe_unable_to_open_stats_file)
+ << StatsFile << EC.message();
+ } else {
+ llvm::PrintStatisticsJSON(*StatS);
+ }
}
return !getDiagnostics().getClient()->getNumErrors();
@@ -936,7 +1035,8 @@ static bool compileModuleImpl(CompilerInstance &ImportingInstance,
std::remove_if(PPOpts.Macros.begin(), PPOpts.Macros.end(),
[&HSOpts](const std::pair<std::string, bool> &def) {
StringRef MacroDef = def.first;
- return HSOpts.ModulesIgnoreMacros.count(MacroDef.split('=').first) > 0;
+ return HSOpts.ModulesIgnoreMacros.count(
+ llvm::CachedHashString(MacroDef.split('=').first)) > 0;
}),
PPOpts.Macros.end());
@@ -1022,7 +1122,7 @@ static bool compileModuleImpl(CompilerInstance &ImportingInstance,
// Construct a module-generating action. Passing through the module map is
// safe because the FileManager is shared between the compiler instances.
- GenerateModuleAction CreateModuleAction(
+ GenerateModuleFromModuleMapAction CreateModuleAction(
ModMap.getModuleMapFileForUniquing(Module), Module->IsSystem);
ImportingInstance.getDiagnostics().Report(ImportLoc,
@@ -1292,7 +1392,8 @@ void CompilerInstance::createModuleManager() {
const PreprocessorOptions &PPOpts = getPreprocessorOpts();
std::unique_ptr<llvm::Timer> ReadTimer;
if (FrontendTimerGroup)
- ReadTimer = llvm::make_unique<llvm::Timer>("Reading modules",
+ ReadTimer = llvm::make_unique<llvm::Timer>("reading_modules",
+ "Reading modules",
*FrontendTimerGroup);
ModuleManager = new ASTReader(
getPreprocessor(), getASTContext(), getPCHContainerReader(),
@@ -1325,7 +1426,8 @@ void CompilerInstance::createModuleManager() {
bool CompilerInstance::loadModuleFile(StringRef FileName) {
llvm::Timer Timer;
if (FrontendTimerGroup)
- Timer.init("Preloading " + FileName.str(), *FrontendTimerGroup);
+ Timer.init("preloading." + FileName.str(), "Preloading " + FileName.str(),
+ *FrontendTimerGroup);
llvm::TimeRegion TimeLoading(FrontendTimerGroup ? &Timer : nullptr);
// Helper to recursively read the module names for all modules we're adding.
@@ -1357,8 +1459,21 @@ bool CompilerInstance::loadModuleFile(StringRef FileName) {
if (Module *M = CI.getPreprocessor()
.getHeaderSearchInfo()
.getModuleMap()
- .findModule(II->getName()))
+ .findModule(II->getName())) {
M->HasIncompatibleModuleFile = true;
+
+ // Mark module as available if the only reason it was unavailable
+ // was missing headers.
+ SmallVector<Module *, 2> Stack;
+ Stack.push_back(M);
+ while (!Stack.empty()) {
+ Module *Current = Stack.pop_back_val();
+ if (Current->IsMissingRequirement) continue;
+ Current->IsAvailable = true;
+ Stack.insert(Stack.end(),
+ Current->submodule_begin(), Current->submodule_end());
+ }
+ }
}
LoadedModules.clear();
}
@@ -1432,7 +1547,25 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
} else {
// Search for a module with the given name.
Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
- if (!Module) {
+ HeaderSearchOptions &HSOpts =
+ PP->getHeaderSearchInfo().getHeaderSearchOpts();
+
+ std::string ModuleFileName;
+ bool LoadFromPrebuiltModulePath = false;
+ // We try to load the module from the prebuilt module paths. If not
+ // successful, we then try to find it in the module cache.
+ if (!HSOpts.PrebuiltModulePaths.empty()) {
+ // Load the module from the prebuilt module path.
+ ModuleFileName = PP->getHeaderSearchInfo().getModuleFileName(
+ ModuleName, "", /*UsePrebuiltPath*/ true);
+ if (!ModuleFileName.empty())
+ LoadFromPrebuiltModulePath = true;
+ }
+ if (!LoadFromPrebuiltModulePath && Module) {
+ // Load the module from the module cache.
+ ModuleFileName = PP->getHeaderSearchInfo().getModuleFileName(Module);
+ } else if (!LoadFromPrebuiltModulePath) {
+ // We can't find a module, error out here.
getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_found)
<< ModuleName
<< SourceRange(ImportLoc, ModuleNameLoc);
@@ -1440,13 +1573,11 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
return ModuleLoadResult();
}
- std::string ModuleFileName =
- PP->getHeaderSearchInfo().getModuleFileName(Module);
if (ModuleFileName.empty()) {
- if (Module->HasIncompatibleModuleFile) {
+ if (Module && Module->HasIncompatibleModuleFile) {
// We tried and failed to load a module file for this module. Fall
// back to textual inclusion for its headers.
- return ModuleLoadResult(nullptr, /*missingExpected*/true);
+ return ModuleLoadResult::ConfigMismatch;
}
getDiagnostics().Report(ModuleNameLoc, diag::err_module_build_disabled)
@@ -1461,19 +1592,50 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
llvm::Timer Timer;
if (FrontendTimerGroup)
- Timer.init("Loading " + ModuleFileName, *FrontendTimerGroup);
+ Timer.init("loading." + ModuleFileName, "Loading " + ModuleFileName,
+ *FrontendTimerGroup);
llvm::TimeRegion TimeLoading(FrontendTimerGroup ? &Timer : nullptr);
- // Try to load the module file.
- unsigned ARRFlags = ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing;
+ // Try to load the module file. If we are trying to load from the prebuilt
+ // module path, we don't have the module map files and don't know how to
+ // rebuild modules.
+ unsigned ARRFlags = LoadFromPrebuiltModulePath ?
+ ASTReader::ARR_ConfigurationMismatch :
+ ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing;
switch (ModuleManager->ReadAST(ModuleFileName,
+ LoadFromPrebuiltModulePath ?
+ serialization::MK_PrebuiltModule :
serialization::MK_ImplicitModule,
- ImportLoc, ARRFlags)) {
- case ASTReader::Success:
+ ImportLoc,
+ ARRFlags)) {
+ case ASTReader::Success: {
+ if (LoadFromPrebuiltModulePath && !Module) {
+ Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
+ if (!Module || !Module->getASTFile() ||
+ FileMgr->getFile(ModuleFileName) != Module->getASTFile()) {
+ // Error out if Module does not refer to the file in the prebuilt
+ // module path.
+ getDiagnostics().Report(ModuleNameLoc, diag::err_module_prebuilt)
+ << ModuleName;
+ ModuleBuildFailed = true;
+ KnownModules[Path[0].first] = nullptr;
+ return ModuleLoadResult();
+ }
+ }
break;
+ }
case ASTReader::OutOfDate:
case ASTReader::Missing: {
+ if (LoadFromPrebuiltModulePath) {
+ // We can't rebuild the module without a module map. Since ReadAST
+ // already produces diagnostics for these two cases, we simply
+ // error out here.
+ ModuleBuildFailed = true;
+ KnownModules[Path[0].first] = nullptr;
+ return ModuleLoadResult();
+ }
+
// The module file is missing or out-of-date. Build it.
assert(Module && "missing module file");
// Check whether there is a cycle in the module graph.
@@ -1524,8 +1686,13 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
break;
}
- case ASTReader::VersionMismatch:
case ASTReader::ConfigurationMismatch:
+ if (LoadFromPrebuiltModulePath)
+ getDiagnostics().Report(SourceLocation(),
+ diag::warn_module_config_mismatch)
+ << ModuleFileName;
+ // Fall through to error out.
+ case ASTReader::VersionMismatch:
case ASTReader::HadErrors:
ModuleLoader::HadFatalFailure = true;
// FIXME: The ASTReader will already have complained, but can we shoehorn
@@ -1617,7 +1784,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
<< Module->getFullModuleName()
<< SourceRange(Path.front().second, Path.back().second);
- return ModuleLoadResult(nullptr, true);
+ return ModuleLoadResult::MissingExpected;
}
// Check whether this module is available.
@@ -1651,7 +1818,7 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
}
LastModuleImportLoc = ImportLoc;
- LastModuleImportResult = ModuleLoadResult(Module, false);
+ LastModuleImportResult = ModuleLoadResult(Module);
return LastModuleImportResult;
}
@@ -1749,3 +1916,8 @@ CompilerInstance::lookupMissingImports(StringRef Name,
return false;
}
void CompilerInstance::resetAndLeakSema() { BuryPointer(takeSema()); }
+
+void CompilerInstance::setExternalSemaSource(
+ IntrusiveRefCntPtr<ExternalSemaSource> ESS) {
+ ExternalSemaSrc = std::move(ESS);
+}
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index c6948ebfc4b4..a0682e26e702 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -7,6 +7,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Frontend/CompilerInvocation.h"
#include "TestModuleFileExtension.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/FileManager.h"
@@ -15,11 +16,11 @@
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Util.h"
-#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/LangStandard.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearchOptions.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ModuleFileExtension.h"
#include "llvm/ADT/Hashing.h"
@@ -97,6 +98,9 @@ static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
if (S == "s" || S == "z" || S.empty())
return 2;
+ if (S == "g")
+ return 1;
+
return getLastArgIntValue(Args, OPT_O, DefaultOpt, Diags);
}
@@ -237,6 +241,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
Opts.ShowCheckerHelp = Args.hasArg(OPT_analyzer_checker_help);
+ Opts.ShowEnabledCheckerList = Args.hasArg(OPT_analyzer_list_enabled_checkers);
Opts.DisableAllChecks = Args.hasArg(OPT_analyzer_disable_all_checks);
Opts.visualizeExplodedGraphWithGraphViz =
@@ -436,28 +441,37 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
}
Opts.OptimizationLevel = OptimizationLevel;
- // We must always run at least the always inlining pass.
- Opts.setInlining(
- (Opts.OptimizationLevel > 1) ? CodeGenOptions::NormalInlining
- : CodeGenOptions::OnlyAlwaysInlining);
- // -fno-inline-functions overrides OptimizationLevel > 1.
- Opts.NoInline = Args.hasArg(OPT_fno_inline);
- if (Arg* InlineArg = Args.getLastArg(options::OPT_finline_functions,
- options::OPT_finline_hint_functions,
- options::OPT_fno_inline_functions)) {
- const Option& InlineOpt = InlineArg->getOption();
- if (InlineOpt.matches(options::OPT_finline_functions))
- Opts.setInlining(CodeGenOptions::NormalInlining);
- else if (InlineOpt.matches(options::OPT_finline_hint_functions))
- Opts.setInlining(CodeGenOptions::OnlyHintInlining);
- else
- Opts.setInlining(CodeGenOptions::OnlyAlwaysInlining);
+ // At O0 we want to fully disable inlining outside of cases marked with
+ // 'alwaysinline' that are required for correctness.
+ Opts.setInlining((Opts.OptimizationLevel == 0)
+ ? CodeGenOptions::OnlyAlwaysInlining
+ : CodeGenOptions::NormalInlining);
+ // Explicit inlining flags can disable some or all inlining even at
+ // optimization levels above zero.
+ if (Arg *InlineArg = Args.getLastArg(
+ options::OPT_finline_functions, options::OPT_finline_hint_functions,
+ options::OPT_fno_inline_functions, options::OPT_fno_inline)) {
+ if (Opts.OptimizationLevel > 0) {
+ const Option &InlineOpt = InlineArg->getOption();
+ if (InlineOpt.matches(options::OPT_finline_functions))
+ Opts.setInlining(CodeGenOptions::NormalInlining);
+ else if (InlineOpt.matches(options::OPT_finline_hint_functions))
+ Opts.setInlining(CodeGenOptions::OnlyHintInlining);
+ else
+ Opts.setInlining(CodeGenOptions::OnlyAlwaysInlining);
+ }
}
+ Opts.ExperimentalNewPassManager = Args.hasFlag(
+ OPT_fexperimental_new_pass_manager, OPT_fno_experimental_new_pass_manager,
+ /* Default */ false);
+
if (Arg *A = Args.getLastArg(OPT_fveclib)) {
StringRef Name = A->getValue();
if (Name == "Accelerate")
Opts.setVecLib(CodeGenOptions::Accelerate);
+ else if (Name == "SVML")
+ Opts.setVecLib(CodeGenOptions::SVML);
else if (Name == "none")
Opts.setVecLib(CodeGenOptions::NoLibrary);
else
@@ -495,6 +509,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.WholeProgramVTables = Args.hasArg(OPT_fwhole_program_vtables);
Opts.LTOVisibilityPublicStd = Args.hasArg(OPT_flto_visibility_public_std);
Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file);
+ Opts.SplitDwarfInlining = !Args.hasArg(OPT_fno_split_dwarf_inlining);
Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);
Opts.DebugExplicitImport = Triple.isPS4CPU();
@@ -505,7 +520,6 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.getLastArg(OPT_emit_llvm_uselists, OPT_no_emit_llvm_uselists))
Opts.EmitLLVMUseLists = A->getOption().getID() == OPT_emit_llvm_uselists;
- Opts.DisableLLVMOpts = Args.hasArg(OPT_disable_llvm_optzns);
Opts.DisableLLVMPasses = Args.hasArg(OPT_disable_llvm_passes);
Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables);
@@ -543,6 +557,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasFlag(OPT_fcoverage_mapping, OPT_fno_coverage_mapping, false);
Opts.DumpCoverageMapping = Args.hasArg(OPT_dump_coverage_mapping);
Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
+ Opts.PreserveAsmComments = !Args.hasArg(OPT_fno_preserve_as_comments);
Opts.AssumeSaneOperatorNew = !Args.hasArg(OPT_fno_assume_sane_operator_new);
Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
Opts.CXAAtExit = !Args.hasArg(OPT_fno_use_cxa_atexit);
@@ -566,7 +581,11 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_cl_fast_relaxed_math));
Opts.NoSignedZeros = (Args.hasArg(OPT_fno_signed_zeros) ||
Args.hasArg(OPT_cl_no_signed_zeros));
+ Opts.FlushDenorm = Args.hasArg(OPT_cl_denorms_are_zero);
+ Opts.CorrectlyRoundedDivSqrt =
+ Args.hasArg(OPT_cl_fp32_correctly_rounded_divide_sqrt);
Opts.ReciprocalMath = Args.hasArg(OPT_freciprocal_math);
+ Opts.NoTrappingMath = Args.hasArg(OPT_fno_trapping_math);
Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
Opts.BackendOptions = Args.getAllArgValues(OPT_backend_option);
Opts.NumRegisterParameters = getLastArgIntValue(Args, OPT_mregparm, 0, Diags);
@@ -576,6 +595,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.RelaxAll = Args.hasArg(OPT_mrelax_all);
Opts.IncrementalLinkerCompatible =
Args.hasArg(OPT_mincremental_linker_compatible);
+ Opts.PIECopyRelocations =
+ Args.hasArg(OPT_mpie_copy_relocations);
Opts.OmitLeafFramePointer = Args.hasArg(OPT_momit_leaf_frame_pointer);
Opts.SaveTempLabels = Args.hasArg(OPT_msave_temp_labels);
Opts.NoDwarfDirectoryAsm = Args.hasArg(OPT_fno_dwarf_directory_asm);
@@ -629,7 +650,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
if (Opts.EmitGcovArcs || Opts.EmitGcovNotes) {
- Opts.CoverageFile = Args.getLastArgValue(OPT_coverage_file);
+ Opts.CoverageDataFile = Args.getLastArgValue(OPT_coverage_data_file);
+ Opts.CoverageNotesFile = Args.getLastArgValue(OPT_coverage_notes_file);
Opts.CoverageExtraChecksum = Args.hasArg(OPT_coverage_cfg_checksum);
Opts.CoverageNoFunctionNamesInData =
Args.hasArg(OPT_coverage_no_function_names_in_data);
@@ -708,17 +730,24 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Args.hasArg(OPT_fsanitize_coverage_indirect_calls);
Opts.SanitizeCoverageTraceBB = Args.hasArg(OPT_fsanitize_coverage_trace_bb);
Opts.SanitizeCoverageTraceCmp = Args.hasArg(OPT_fsanitize_coverage_trace_cmp);
+ Opts.SanitizeCoverageTraceDiv = Args.hasArg(OPT_fsanitize_coverage_trace_div);
+ Opts.SanitizeCoverageTraceGep = Args.hasArg(OPT_fsanitize_coverage_trace_gep);
Opts.SanitizeCoverage8bitCounters =
Args.hasArg(OPT_fsanitize_coverage_8bit_counters);
Opts.SanitizeCoverageTracePC = Args.hasArg(OPT_fsanitize_coverage_trace_pc);
+ Opts.SanitizeCoverageTracePCGuard =
+ Args.hasArg(OPT_fsanitize_coverage_trace_pc_guard);
Opts.SanitizeMemoryTrackOrigins =
getLastArgIntValue(Args, OPT_fsanitize_memory_track_origins_EQ, 0, Diags);
Opts.SanitizeMemoryUseAfterDtor =
Args.hasArg(OPT_fsanitize_memory_use_after_dtor);
Opts.SanitizeCfiCrossDso = Args.hasArg(OPT_fsanitize_cfi_cross_dso);
Opts.SanitizeStats = Args.hasArg(OPT_fsanitize_stats);
- Opts.SanitizeAddressUseAfterScope =
- Args.hasArg(OPT_fsanitize_address_use_after_scope);
+ if (Arg *A = Args.getLastArg(OPT_fsanitize_address_use_after_scope,
+ OPT_fno_sanitize_address_use_after_scope)) {
+ Opts.SanitizeAddressUseAfterScope =
+ A->getOption().getID() == OPT_fsanitize_address_use_after_scope;
+ }
Opts.SSPBufferSize =
getLastArgIntValue(Args, OPT_stack_protector_buffer_size, 8, Diags);
Opts.StackRealignment = Args.hasArg(OPT_mstackrealign);
@@ -783,6 +812,18 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
+ if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_EQ)) {
+ StringRef Val = A->getValue();
+ if (Val == "ieee")
+ Opts.FPDenormalMode = "ieee";
+ else if (Val == "preserve-sign")
+ Opts.FPDenormalMode = "preserve-sign";
+ else if (Val == "positive-zero")
+ Opts.FPDenormalMode = "positive-zero";
+ else
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
+ }
+
if (Arg *A = Args.getLastArg(OPT_fpcc_struct_return, OPT_freg_struct_return)) {
if (A->getOption().matches(OPT_fpcc_struct_return)) {
Opts.setStructReturnConvention(CodeGenOptions::SRCK_OnStack);
@@ -796,6 +837,10 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.LinkerOptions = Args.getAllArgValues(OPT_linker_option);
bool NeedLocTracking = false;
+ Opts.OptRecordFile = Args.getLastArgValue(OPT_opt_record_file);
+ if (!Opts.OptRecordFile.empty())
+ NeedLocTracking = true;
+
if (Arg *A = Args.getLastArg(OPT_Rpass_EQ)) {
Opts.OptimizationRemarkPattern =
GenerateOptimizationRemarkRegex(Diags, Args, A);
@@ -814,6 +859,12 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
NeedLocTracking = true;
}
+ Opts.DiagnosticsWithHotness =
+ Args.hasArg(options::OPT_fdiagnostics_show_hotness);
+ if (Opts.DiagnosticsWithHotness &&
+ Opts.getProfileUse() == CodeGenOptions::ProfileNone)
+ Diags.Report(diag::warn_drv_fdiagnostics_show_hotness_requires_pgo);
+
// If the user requested to use a sample profile for PGO, then the
// backend will need to track source location information so the profile
// can be incorporated into the IR.
@@ -885,21 +936,13 @@ static bool parseShowColorsArgs(const ArgList &Args, bool DefaultColor) {
} ShowColors = DefaultColor ? Colors_Auto : Colors_Off;
for (Arg *A : Args) {
const Option &O = A->getOption();
- if (!O.matches(options::OPT_fcolor_diagnostics) &&
- !O.matches(options::OPT_fdiagnostics_color) &&
- !O.matches(options::OPT_fno_color_diagnostics) &&
- !O.matches(options::OPT_fno_diagnostics_color) &&
- !O.matches(options::OPT_fdiagnostics_color_EQ))
- continue;
-
if (O.matches(options::OPT_fcolor_diagnostics) ||
O.matches(options::OPT_fdiagnostics_color)) {
ShowColors = Colors_On;
} else if (O.matches(options::OPT_fno_color_diagnostics) ||
O.matches(options::OPT_fno_diagnostics_color)) {
ShowColors = Colors_Off;
- } else {
- assert(O.matches(options::OPT_fdiagnostics_color_EQ));
+ } else if (O.matches(options::OPT_fdiagnostics_color_EQ)) {
StringRef Value(A->getValue());
if (Value == "always")
ShowColors = Colors_On;
@@ -909,15 +952,14 @@ static bool parseShowColorsArgs(const ArgList &Args, bool DefaultColor) {
ShowColors = Colors_Auto;
}
}
- if (ShowColors == Colors_On ||
- (ShowColors == Colors_Auto && llvm::sys::Process::StandardErrHasColors()))
- return true;
- return false;
+ return ShowColors == Colors_On ||
+ (ShowColors == Colors_Auto &&
+ llvm::sys::Process::StandardErrHasColors());
}
bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
DiagnosticsEngine *Diags,
- bool DefaultDiagColor) {
+ bool DefaultDiagColor, bool DefaultShowOpt) {
using namespace options;
bool Success = true;
@@ -936,7 +978,10 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
/*Default=*/true);
Opts.ShowFixits = !Args.hasArg(OPT_fno_diagnostics_fixit_info);
Opts.ShowLocation = !Args.hasArg(OPT_fno_show_source_location);
- Opts.ShowOptionNames = Args.hasArg(OPT_fdiagnostics_show_option);
+ Opts.AbsolutePath = Args.hasArg(OPT_fdiagnostics_absolute_paths);
+ Opts.ShowOptionNames =
+ Args.hasFlag(OPT_fdiagnostics_show_option,
+ OPT_fno_diagnostics_show_option, DefaultShowOpt);
llvm::sys::Process::UseANSIEscapeCodes(Args.hasArg(OPT_fansi_escape_codes));
@@ -1066,7 +1111,8 @@ static bool parseTestModuleFileExtensionArg(StringRef Arg,
}
static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
- DiagnosticsEngine &Diags) {
+ DiagnosticsEngine &Diags,
+ bool &IsHeaderFile) {
using namespace options;
Opts.ProgramAction = frontend::ParseSyntaxOnly;
if (const Arg *A = Args.getLastArg(OPT_Action_Group)) {
@@ -1107,6 +1153,8 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.ProgramAction = frontend::FixIt; break;
case OPT_emit_module:
Opts.ProgramAction = frontend::GenerateModule; break;
+ case OPT_emit_module_interface:
+ Opts.ProgramAction = frontend::GenerateModuleInterface; break;
case OPT_emit_pch:
Opts.ProgramAction = frontend::GeneratePCH; break;
case OPT_emit_pth:
@@ -1216,6 +1264,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.AuxTriple =
llvm::Triple::normalize(Args.getLastArgValue(OPT_aux_triple));
Opts.FindPchSource = Args.getLastArgValue(OPT_find_pch_source_EQ);
+ Opts.StatsFile = Args.getLastArgValue(OPT_stats_file);
if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
OPT_arcmt_modify,
@@ -1286,11 +1335,13 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
.Case("cl", IK_OpenCL)
.Case("cuda", IK_CUDA)
.Case("c++", IK_CXX)
+ .Case("c++-module", IK_CXX)
.Case("objective-c", IK_ObjC)
.Case("objective-c++", IK_ObjCXX)
.Case("cpp-output", IK_PreprocessedC)
.Case("assembler-with-cpp", IK_Asm)
.Case("c++-cpp-output", IK_PreprocessedCXX)
+ .Case("c++-module-cpp-output", IK_PreprocessedCXX)
.Case("cuda-cpp-output", IK_PreprocessedCuda)
.Case("objective-c-cpp-output", IK_PreprocessedObjC)
.Case("objc-cpp-output", IK_PreprocessedObjC)
@@ -1308,6 +1359,13 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
if (DashX == IK_None)
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
+ IsHeaderFile = llvm::StringSwitch<bool>(A->getValue())
+ .Case("c-header", true)
+ .Case("cl-header", true)
+ .Case("objective-c-header", true)
+ .Case("c++-header", true)
+ .Case("objective-c++-header", true)
+ .Default(false);
}
// '-' is the default input if none is given.
@@ -1360,7 +1418,11 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
Opts.ResourceDir = Args.getLastArgValue(OPT_resource_dir);
Opts.ModuleCachePath = Args.getLastArgValue(OPT_fmodules_cache_path);
Opts.ModuleUserBuildPath = Args.getLastArgValue(OPT_fmodules_user_build_path);
+ for (const Arg *A : Args.filtered(OPT_fprebuilt_module_path))
+ Opts.AddPrebuiltModulePath(A->getValue());
Opts.DisableModuleHash = Args.hasArg(OPT_fdisable_module_hash);
+ Opts.ModulesValidateDiagnosticOptions =
+ !Args.hasArg(OPT_fmodules_disable_diagnostic_validation);
Opts.ImplicitModuleMaps = Args.hasArg(OPT_fimplicit_module_maps);
Opts.ModuleMapFileHomeIsCwd = Args.hasArg(OPT_fmodule_map_file_home_is_cwd);
Opts.ModuleCachePruneInterval =
@@ -1378,7 +1440,8 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
for (const Arg *A : Args.filtered(OPT_fmodules_ignore_macro)) {
StringRef MacroDef = A->getValue();
- Opts.ModulesIgnoreMacros.insert(MacroDef.split('=').first);
+ Opts.ModulesIgnoreMacros.insert(
+ llvm::CachedHashString(MacroDef.split('=').first));
}
// Add -I..., -F..., and -index-header-map options in order.
@@ -1405,7 +1468,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
Path = Buffer.str();
}
- Opts.AddPath(Path.c_str(), Group, IsFramework,
+ Opts.AddPath(Path, Group, IsFramework,
/*IgnoreSysroot*/ true);
IsIndexHeaderMap = false;
}
@@ -1461,7 +1524,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
Opts.AddVFSOverlayFile(A->getValue());
}
-bool isOpenCL(LangStandard::Kind LangStd) {
+static bool isOpenCL(LangStandard::Kind LangStd) {
return LangStd == LangStandard::lang_opencl ||
LangStd == LangStandard::lang_opencl11 ||
LangStd == LangStandard::lang_opencl12 ||
@@ -1501,14 +1564,16 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
case IK_Asm:
case IK_C:
case IK_PreprocessedC:
- case IK_ObjC:
- case IK_PreprocessedObjC:
// The PS4 uses C99 as the default C standard.
if (T.isPS4())
LangStd = LangStandard::lang_gnu99;
else
LangStd = LangStandard::lang_gnu11;
break;
+ case IK_ObjC:
+ case IK_PreprocessedObjC:
+ LangStd = LangStandard::lang_gnu11;
+ break;
case IK_CXX:
case IK_PreprocessedCXX:
case IK_ObjCXX:
@@ -1582,6 +1647,8 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.GNUKeywords = Opts.GNUMode;
Opts.CXXOperatorNames = Opts.CPlusPlus;
+ Opts.AlignedAllocation = Opts.CPlusPlus1z;
+
Opts.DollarIdents = !Opts.AsmPreprocessor;
}
@@ -1871,13 +1938,14 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
Opts.Blocks = Args.hasArg(OPT_fblocks) || (Opts.OpenCL
&& Opts.OpenCLVersion >= 200);
Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
- Opts.Coroutines = Args.hasArg(OPT_fcoroutines);
- Opts.Modules = Args.hasArg(OPT_fmodules);
+ Opts.CoroutinesTS = Args.hasArg(OPT_fcoroutines_ts);
+ Opts.ModulesTS = Args.hasArg(OPT_fmodules_ts);
+ Opts.Modules = Args.hasArg(OPT_fmodules) || Opts.ModulesTS;
Opts.ModulesStrictDeclUse = Args.hasArg(OPT_fmodules_strict_decluse);
Opts.ModulesDeclUse =
Args.hasArg(OPT_fmodules_decluse) || Opts.ModulesStrictDeclUse;
Opts.ModulesLocalVisibility =
- Args.hasArg(OPT_fmodules_local_submodule_visibility);
+ Args.hasArg(OPT_fmodules_local_submodule_visibility) || Opts.ModulesTS;
Opts.ModulesSearchAll = Opts.Modules &&
!Args.hasArg(OPT_fno_modules_search_all) &&
Args.hasArg(OPT_fmodules_search_all);
@@ -1892,14 +1960,27 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
if (!Opts.NoBuiltin)
getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
Opts.NoMathBuiltin = Args.hasArg(OPT_fno_math_builtin);
+ Opts.RelaxedTemplateTemplateArgs =
+ Args.hasArg(OPT_frelaxed_template_template_args);
Opts.SizedDeallocation = Args.hasArg(OPT_fsized_deallocation);
+ Opts.AlignedAllocation =
+ Args.hasFlag(OPT_faligned_allocation, OPT_fno_aligned_allocation,
+ Opts.AlignedAllocation);
+ Opts.NewAlignOverride =
+ getLastArgIntValue(Args, OPT_fnew_alignment_EQ, 0, Diags);
+ if (Opts.NewAlignOverride && !llvm::isPowerOf2_32(Opts.NewAlignOverride)) {
+ Arg *A = Args.getLastArg(OPT_fnew_alignment_EQ);
+ Diags.Report(diag::err_fe_invalid_alignment) << A->getAsString(Args)
+ << A->getValue();
+ Opts.NewAlignOverride = 0;
+ }
Opts.ConceptsTS = Args.hasArg(OPT_fconcepts_ts);
Opts.HeinousExtensions = Args.hasArg(OPT_fheinous_gnu_extensions);
Opts.AccessControl = !Args.hasArg(OPT_fno_access_control);
Opts.ElideConstructors = !Args.hasArg(OPT_fno_elide_constructors);
Opts.MathErrno = !Opts.OpenCL && Args.hasArg(OPT_fmath_errno);
Opts.InstantiationDepth =
- getLastArgIntValue(Args, OPT_ftemplate_depth, 256, Diags);
+ getLastArgIntValue(Args, OPT_ftemplate_depth, 1024, Diags);
Opts.ArrowDepth =
getLastArgIntValue(Args, OPT_foperator_arrow_depth, 256, Diags);
Opts.ConstexprCallDepth =
@@ -1955,9 +2036,10 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// enabled for Microsoft Extensions or Borland Extensions, here.
//
// FIXME: __declspec is also currently enabled for CUDA, but isn't really a
- // CUDA extension, however it is required for supporting cuda_builtin_vars.h,
- // which uses __declspec(property). Once that has been rewritten in terms of
- // something more generic, remove the Opts.CUDA term here.
+ // CUDA extension. However, it is required for supporting
+ // __clang_cuda_builtin_vars.h, which uses __declspec(property). Once that has
+ // been rewritten in terms of something more generic, remove the Opts.CUDA
+ // term here.
Opts.DeclSpecKeyword =
Args.hasFlag(OPT_fdeclspec, OPT_fno_declspec,
(Opts.MicrosoftExt || Opts.Borland || Opts.CUDA));
@@ -2115,7 +2197,12 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
// This is the __NO_INLINE__ define, which just depends on things like the
// optimization level and -fno-inline, not actually whether the backend has
// inlining enabled.
- Opts.NoInlineDefine = !Opt || Args.hasArg(OPT_fno_inline);
+ Opts.NoInlineDefine = !Opts.Optimize;
+ if (Arg *InlineArg = Args.getLastArg(
+ options::OPT_finline_functions, options::OPT_finline_hint_functions,
+ options::OPT_fno_inline_functions, options::OPT_fno_inline))
+ if (InlineArg->getOption().matches(options::OPT_fno_inline))
+ Opts.NoInlineDefine = true;
Opts.FastMath = Args.hasArg(OPT_ffast_math) ||
Args.hasArg(OPT_cl_fast_relaxed_math);
@@ -2245,6 +2332,7 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
case frontend::EmitObj:
case frontend::FixIt:
case frontend::GenerateModule:
+ case frontend::GenerateModuleInterface:
case frontend::GeneratePCH:
case frontend::GeneratePTH:
case frontend::ParseSyntaxOnly:
@@ -2274,6 +2362,7 @@ static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
Opts.ShowLineMarkers = !Args.hasArg(OPT_P);
Opts.ShowMacroComments = Args.hasArg(OPT_CC);
Opts.ShowMacros = Args.hasArg(OPT_dM) || Args.hasArg(OPT_dD);
+ Opts.ShowIncludeDirectives = Args.hasArg(OPT_dI);
Opts.RewriteIncludes = Args.hasArg(OPT_frewrite_includes);
Opts.UseLineDirectives = Args.hasArg(OPT_fuse_line_directives);
}
@@ -2305,6 +2394,7 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
// Use the default target triple if unspecified.
if (Opts.Triple.empty())
Opts.Triple = llvm::sys::getDefaultTargetTriple();
+ Opts.OpenCLExtensionsAsWritten = Args.getAllArgValues(OPT_cl_ext_EQ);
}
bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
@@ -2338,12 +2428,14 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
Success &= ParseAnalyzerArgs(*Res.getAnalyzerOpts(), Args, Diags);
Success &= ParseMigratorArgs(Res.getMigratorOpts(), Args);
ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), Args);
- Success &= ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags,
- false /*DefaultDiagColor*/);
+ Success &=
+ ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags,
+ false /*DefaultDiagColor*/, false /*DefaultShowOpt*/);
ParseCommentArgs(LangOpts.CommentOpts, Args);
ParseFileSystemArgs(Res.getFileSystemOpts(), Args);
// FIXME: We shouldn't have to pass the DashX option around here
- InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), Args, Diags);
+ InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), Args, Diags,
+ LangOpts.IsHeaderFile);
ParseTargetArgs(Res.getTargetOpts(), Args, Diags);
Success &= ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags,
Res.getTargetOpts());
@@ -2394,6 +2486,13 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
ParsePreprocessorArgs(Res.getPreprocessorOpts(), Args, FileMgr, Diags);
ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), Args,
Res.getFrontendOpts().ProgramAction);
+
+ // Turn on -Wspir-compat for SPIR target.
+ llvm::Triple T(Res.getTargetOpts().Triple);
+ auto Arch = T.getArch();
+ if (Arch == llvm::Triple::spir || Arch == llvm::Triple::spir64) {
+ Res.getDiagnosticOpts().Warnings.push_back("spir-compat");
+ }
return Success;
}
@@ -2441,7 +2540,8 @@ std::string CompilerInvocation::getModuleHash() const {
if (!hsOpts.ModulesIgnoreMacros.empty()) {
// Check whether we're ignoring this macro.
StringRef MacroDef = I->first;
- if (hsOpts.ModulesIgnoreMacros.count(MacroDef.split('=').first))
+ if (hsOpts.ModulesIgnoreMacros.count(
+ llvm::CachedHashString(MacroDef.split('=').first)))
continue;
}
@@ -2455,7 +2555,8 @@ std::string CompilerInvocation::getModuleHash() const {
hsOpts.UseBuiltinIncludes,
hsOpts.UseStandardSystemIncludes,
hsOpts.UseStandardCXXIncludes,
- hsOpts.UseLibcxx);
+ hsOpts.UseLibcxx,
+ hsOpts.ModulesValidateDiagnosticOptions);
code = hash_combine(code, hsOpts.ResourceDir);
// Extend the signature with the user build path.
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
index a9b61282378d..059f116a3c31 100644
--- a/lib/Frontend/DependencyFile.cpp
+++ b/lib/Frontend/DependencyFile.cpp
@@ -409,9 +409,8 @@ void DFGImpl::OutputDependencyFile() {
const unsigned MaxColumns = 75;
unsigned Columns = 0;
- for (std::vector<std::string>::iterator
- I = Targets.begin(), E = Targets.end(); I != E; ++I) {
- unsigned N = I->length();
+ for (StringRef Target : Targets) {
+ unsigned N = Target.size();
if (Columns == 0) {
Columns += N;
} else if (Columns + N + 2 > MaxColumns) {
@@ -422,7 +421,7 @@ void DFGImpl::OutputDependencyFile() {
OS << ' ';
}
// Targets already quoted as needed.
- OS << *I;
+ OS << Target;
}
OS << ':';
@@ -430,18 +429,17 @@ void DFGImpl::OutputDependencyFile() {
// Now add each dependency in the order it was seen, but avoiding
// duplicates.
- for (std::vector<std::string>::iterator I = Files.begin(),
- E = Files.end(); I != E; ++I) {
+ for (StringRef File : Files) {
// Start a new line if this would exceed the column limit. Make
// sure to leave space for a trailing " \" in case we need to
// break the line on the next iteration.
- unsigned N = I->length();
+ unsigned N = File.size();
if (Columns + (N + 1) + 2 > MaxColumns) {
OS << " \\\n ";
Columns = 2;
}
OS << ' ';
- PrintFilename(OS, *I, OutputFormat);
+ PrintFilename(OS, File, OutputFormat);
Columns += N + 1;
}
OS << '\n';
@@ -449,10 +447,9 @@ void DFGImpl::OutputDependencyFile() {
// Create phony targets if requested.
if (PhonyTarget && !Files.empty()) {
// Skip the first entry, this is always the input file itself.
- for (std::vector<std::string>::iterator I = Files.begin() + 1,
- E = Files.end(); I != E; ++I) {
+ for (StringRef File : Files) {
OS << '\n';
- PrintFilename(OS, *I, OutputFormat);
+ PrintFilename(OS, File, OutputFormat);
OS << ":\n";
}
}
diff --git a/lib/Frontend/DiagnosticRenderer.cpp b/lib/Frontend/DiagnosticRenderer.cpp
index 586d2e6167b3..177feac97441 100644
--- a/lib/Frontend/DiagnosticRenderer.cpp
+++ b/lib/Frontend/DiagnosticRenderer.cpp
@@ -9,7 +9,6 @@
#include "clang/Frontend/DiagnosticRenderer.h"
#include "clang/Basic/DiagnosticOptions.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Edit/Commit.h"
#include "clang/Edit/EditedSource.h"
@@ -18,7 +17,6 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace clang;
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index d514d406d8b6..e871b310302d 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -20,19 +20,20 @@
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Parse/ParseAST.h"
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/Timer.h"
#include "llvm/Support/raw_ostream.h"
#include <system_error>
using namespace clang;
-template class llvm::Registry<clang::PluginASTAction>;
+LLVM_INSTANTIATE_REGISTRY(FrontendPluginRegistry)
namespace {
@@ -287,14 +288,15 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
SmallString<128> DirNative;
llvm::sys::path::native(PCHDir->getName(), DirNative);
bool Found = false;
- for (llvm::sys::fs::directory_iterator Dir(DirNative, EC), DirEnd;
+ vfs::FileSystem &FS = *FileMgr.getVirtualFileSystem();
+ for (vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
// Check whether this is an acceptable AST file.
if (ASTReader::isAcceptableASTFile(
- Dir->path(), FileMgr, CI.getPCHContainerReader(),
+ Dir->getName(), FileMgr, CI.getPCHContainerReader(),
CI.getLangOpts(), CI.getTargetOpts(), CI.getPreprocessorOpts(),
SpecificModuleCachePath)) {
- PPOpts.ImplicitPCHInclude = Dir->path();
+ PPOpts.ImplicitPCHInclude = Dir->getName();
Found = true;
break;
}
diff --git a/lib/Frontend/FrontendActions.cpp b/lib/Frontend/FrontendActions.cpp
index b1e806add8cc..eb91940cbbfc 100644
--- a/lib/Frontend/FrontendActions.cpp
+++ b/lib/Frontend/FrontendActions.cpp
@@ -11,19 +11,18 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/Basic/FileManager.h"
#include "clang/Frontend/ASTConsumers.h"
-#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
-#include "clang/Lex/Pragma.h"
#include "clang/Lex/Preprocessor.h"
-#include "clang/Parse/Parser.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <memory>
#include <system_error>
@@ -92,7 +91,7 @@ GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
auto Buffer = std::make_shared<PCHBuffer>();
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
Consumers.push_back(llvm::make_unique<PCHGenerator>(
- CI.getPreprocessor(), OutputFile, nullptr, Sysroot,
+ CI.getPreprocessor(), OutputFile, Sysroot,
Buffer, CI.getFrontendOpts().ModuleFileExtensions,
/*AllowASTWithErrors*/false,
/*IncludeTimestamps*/
@@ -131,18 +130,18 @@ GeneratePCHAction::ComputeASTConsumerArguments(CompilerInstance &CI,
std::unique_ptr<ASTConsumer>
GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
- std::string Sysroot;
- std::string OutputFile;
- std::unique_ptr<raw_pwrite_stream> OS =
- ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile);
+ std::unique_ptr<raw_pwrite_stream> OS = CreateOutputFile(CI, InFile);
if (!OS)
return nullptr;
+ std::string OutputFile = CI.getFrontendOpts().OutputFile;
+ std::string Sysroot;
+
auto Buffer = std::make_shared<PCHBuffer>();
std::vector<std::unique_ptr<ASTConsumer>> Consumers;
Consumers.push_back(llvm::make_unique<PCHGenerator>(
- CI.getPreprocessor(), OutputFile, Module, Sysroot,
+ CI.getPreprocessor(), OutputFile, Sysroot,
Buffer, CI.getFrontendOpts().ModuleFileExtensions,
/*AllowASTWithErrors=*/false,
/*IncludeTimestamps=*/
@@ -152,6 +151,23 @@ GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
return llvm::make_unique<MultiplexConsumer>(std::move(Consumers));
}
+bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ // Set up embedding for any specified files. Do this before we load any
+ // source files, including the primary module map for the compilation.
+ for (const auto &F : CI.getFrontendOpts().ModulesEmbedFiles) {
+ if (const auto *FE = CI.getFileManager().getFile(F, /*openFile*/true))
+ CI.getSourceManager().setFileIsTransient(FE);
+ else
+ CI.getDiagnostics().Report(diag::err_modules_embed_file_not_found) << F;
+ }
+ if (CI.getFrontendOpts().ModulesEmbedAllFiles)
+ CI.getSourceManager().setAllFilesAreTransient(true);
+
+ return true;
+}
+
+
static SmallVectorImpl<char> &
operator+=(SmallVectorImpl<char> &Includes, StringRef RHS) {
Includes.append(RHS.begin(), RHS.end());
@@ -267,9 +283,12 @@ collectModuleHeaderIncludes(const LangOptions &LangOpts, FileManager &FileMgr,
return std::error_code();
}
-bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
- StringRef Filename) {
- CI.getLangOpts().CompilingModule = true;
+bool GenerateModuleFromModuleMapAction::BeginSourceFileAction(
+ CompilerInstance &CI, StringRef Filename) {
+ CI.getLangOpts().setCompilingModule(LangOptions::CMK_ModuleMap);
+
+ if (!GenerateModuleAction::BeginSourceFileAction(CI, Filename))
+ return false;
// Find the module map file.
const FileEntry *ModuleMap =
@@ -280,17 +299,6 @@ bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
return false;
}
- // Set up embedding for any specified files. Do this before we load any
- // source files, including the primary module map for the compilation.
- for (const auto &F : CI.getFrontendOpts().ModulesEmbedFiles) {
- if (const auto *FE = CI.getFileManager().getFile(F, /*openFile*/true))
- CI.getSourceManager().setFileIsTransient(FE);
- else
- CI.getDiagnostics().Report(diag::err_modules_embed_file_not_found) << F;
- }
- if (CI.getFrontendOpts().ModulesEmbedAllFiles)
- CI.getSourceManager().setAllFilesAreTransient(true);
-
// Parse the module map file.
HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
if (HS.loadModuleMapFile(ModuleMap, IsSystem))
@@ -382,32 +390,43 @@ bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
}
std::unique_ptr<raw_pwrite_stream>
-GenerateModuleAction::ComputeASTConsumerArguments(CompilerInstance &CI,
- StringRef InFile,
- std::string &Sysroot,
- std::string &OutputFile) {
+GenerateModuleFromModuleMapAction::CreateOutputFile(CompilerInstance &CI,
+ StringRef InFile) {
// If no output file was provided, figure out where this module would go
// in the module cache.
if (CI.getFrontendOpts().OutputFile.empty()) {
HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
CI.getFrontendOpts().OutputFile =
HS.getModuleFileName(CI.getLangOpts().CurrentModule,
- ModuleMapForUniquing->getName());
+ ModuleMapForUniquing->getName(),
+ /*UsePrebuiltPath=*/false);
}
// We use createOutputFile here because this is exposed via libclang, and we
// must disable the RemoveFileOnSignal behavior.
// We use a temporary to avoid race conditions.
- std::unique_ptr<raw_pwrite_stream> OS =
- CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
- /*RemoveFileOnSignal=*/false, InFile,
- /*Extension=*/"", /*useTemporary=*/true,
- /*CreateMissingDirectories=*/true);
- if (!OS)
- return nullptr;
+ return CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
+ /*RemoveFileOnSignal=*/false, InFile,
+ /*Extension=*/"", /*useTemporary=*/true,
+ /*CreateMissingDirectories=*/true);
+}
- OutputFile = CI.getFrontendOpts().OutputFile;
- return OS;
+bool GenerateModuleInterfaceAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ if (!CI.getLangOpts().ModulesTS) {
+ CI.getDiagnostics().Report(diag::err_module_interface_requires_modules_ts);
+ return false;
+ }
+
+ CI.getLangOpts().setCompilingModule(LangOptions::CMK_ModuleInterface);
+
+ return GenerateModuleAction::BeginSourceFileAction(CI, Filename);
+}
+
+std::unique_ptr<raw_pwrite_stream>
+GenerateModuleInterfaceAction::CreateOutputFile(CompilerInstance &CI,
+ StringRef InFile) {
+ return CI.createDefaultOutputFile(/*Binary=*/true, InFile, "pcm");
}
SyntaxOnlyAction::~SyntaxOnlyAction() {
@@ -597,6 +616,13 @@ namespace {
};
}
+bool DumpModuleInfoAction::BeginInvocation(CompilerInstance &CI) {
+ // The Object file reader also supports raw ast files and there is no point in
+ // being strict about the module file format in -module-file-info mode.
+ CI.getHeaderSearchOpts().ModuleFormat = "obj";
+ return true;
+}
+
void DumpModuleInfoAction::ExecuteAction() {
// Set up the output file.
std::unique_ptr<llvm::raw_fd_ostream> OutFile;
@@ -609,11 +635,21 @@ void DumpModuleInfoAction::ExecuteAction() {
llvm::raw_ostream &Out = OutFile.get()? *OutFile.get() : llvm::outs();
Out << "Information for module file '" << getCurrentFile() << "':\n";
+ auto &FileMgr = getCompilerInstance().getFileManager();
+ auto Buffer = FileMgr.getBufferForFile(getCurrentFile());
+ StringRef Magic = (*Buffer)->getMemBufferRef().getBuffer();
+ bool IsRaw = (Magic.size() >= 4 && Magic[0] == 'C' && Magic[1] == 'P' &&
+ Magic[2] == 'C' && Magic[3] == 'H');
+ Out << " Module format: " << (IsRaw ? "raw" : "obj") << "\n";
+
+ Preprocessor &PP = getCompilerInstance().getPreprocessor();
DumpModuleInfoListener Listener(Out);
+ HeaderSearchOptions &HSOpts =
+ PP.getHeaderSearchInfo().getHeaderSearchOpts();
ASTReader::readASTFileControlBlock(
- getCurrentFile(), getCompilerInstance().getFileManager(),
- getCompilerInstance().getPCHContainerReader(),
- /*FindModuleFileExtensions=*/true, Listener);
+ getCurrentFile(), FileMgr, getCompilerInstance().getPCHContainerReader(),
+ /*FindModuleFileExtensions=*/true, Listener,
+ HSOpts.ModulesValidateDiagnosticOptions);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/FrontendOptions.cpp b/lib/Frontend/FrontendOptions.cpp
index 9ede674e47ea..6a82084aff1b 100644
--- a/lib/Frontend/FrontendOptions.cpp
+++ b/lib/Frontend/FrontendOptions.cpp
@@ -25,6 +25,8 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
.Case("mii", IK_PreprocessedObjCXX)
.Cases("C", "cc", "cp", IK_CXX)
.Cases("cpp", "CPP", "c++", "cxx", "hpp", IK_CXX)
+ .Case("cppm", IK_CXX)
+ .Case("iim", IK_PreprocessedCXX)
.Case("cl", IK_OpenCL)
.Case("cu", IK_CUDA)
.Cases("ll", "bc", IK_LLVM_IR)
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index 1b5c760f01b5..d50fb6d788a4 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -11,10 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/Utils.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Config/config.h" // C_INCLUDE_DIRS
+#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderMap.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
@@ -25,7 +25,6 @@
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -527,7 +526,7 @@ static unsigned RemoveDuplicates(std::vector<DirectoryLookup> &SearchList,
if (CurEntry.getDirCharacteristic() != SrcMgr::C_User) {
// Find the dir that this is the same of.
unsigned FirstDir;
- for (FirstDir = 0; ; ++FirstDir) {
+ for (FirstDir = First;; ++FirstDir) {
assert(FirstDir != i && "Didn't find dupe?");
const DirectoryLookup &SearchEntry = SearchList[FirstDir];
@@ -626,7 +625,7 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
for (unsigned i = 0, e = SearchList.size(); i != e; ++i) {
if (i == NumQuoted)
llvm::errs() << "#include <...> search starts here:\n";
- const char *Name = SearchList[i].getName();
+ StringRef Name = SearchList[i].getName();
const char *Suffix;
if (SearchList[i].isNormalDir())
Suffix = "";
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index 6b93c697d9b1..17603ada11d1 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/Utils.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/MacroBuilder.h"
#include "clang/Basic/SourceManager.h"
@@ -19,15 +18,13 @@
#include "clang/Basic/Version.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/PTHManager.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/APFloat.h"
-#include "llvm/Support/FileSystem.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Path.h"
using namespace clang;
static bool MacroBodyEndsInBackslash(StringRef MacroBody) {
@@ -115,15 +112,15 @@ template <typename T>
static T PickFP(const llvm::fltSemantics *Sem, T IEEESingleVal,
T IEEEDoubleVal, T X87DoubleExtendedVal, T PPCDoubleDoubleVal,
T IEEEQuadVal) {
- if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEsingle)
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEsingle())
return IEEESingleVal;
- if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEdouble)
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEdouble())
return IEEEDoubleVal;
- if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::x87DoubleExtended)
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::x87DoubleExtended())
return X87DoubleExtendedVal;
- if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::PPCDoubleDouble)
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::PPCDoubleDouble())
return PPCDoubleDoubleVal;
- assert(Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEquad);
+ assert(Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEquad());
return IEEEQuadVal;
}
@@ -395,6 +392,15 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
// C++ translation unit.
else
Builder.defineMacro("__cplusplus", "199711L");
+
+ // C++1z [cpp.predefined]p1:
+ // An integer literal of type std::size_t whose value is the alignment
+ // guaranteed by a call to operator new(std::size_t)
+ //
+ // We provide this in all language modes, since it seems generally useful.
+ Builder.defineMacro("__STDCPP_DEFAULT_NEW_ALIGNMENT__",
+ Twine(TI.getNewAlign() / TI.getCharWidth()) +
+ TI.getTypeConstantSuffix(TI.getSizeType()));
}
// In C11 these are environment macros. In C++11 they are only defined
@@ -438,6 +444,9 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("CL_VERSION_1_2", "120");
Builder.defineMacro("CL_VERSION_2_0", "200");
+ if (TI.isLittleEndian())
+ Builder.defineMacro("__ENDIAN_LITTLE__");
+
if (LangOpts.FastRelaxedMath)
Builder.defineMacro("__FAST_RELAXED_MATH__");
}
@@ -467,8 +476,10 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_lambdas", "200907");
Builder.defineMacro("__cpp_constexpr",
LangOpts.CPlusPlus14 ? "201304" : "200704");
- Builder.defineMacro("__cpp_range_based_for", "200907");
- Builder.defineMacro("__cpp_static_assert", "200410");
+ Builder.defineMacro("__cpp_range_based_for",
+ LangOpts.CPlusPlus1z ? "201603" : "200907");
+ Builder.defineMacro("__cpp_static_assert",
+ LangOpts.CPlusPlus1z ? "201411" : "200410");
Builder.defineMacro("__cpp_decltype", "200707");
Builder.defineMacro("__cpp_attributes", "200809");
Builder.defineMacro("__cpp_rvalue_references", "200610");
@@ -476,7 +487,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_initializer_lists", "200806");
Builder.defineMacro("__cpp_delegating_constructors", "200604");
Builder.defineMacro("__cpp_nsdmi", "200809");
- Builder.defineMacro("__cpp_inheriting_constructors", "200802");
+ Builder.defineMacro("__cpp_inheriting_constructors", "201511");
Builder.defineMacro("__cpp_ref_qualifiers", "200710");
Builder.defineMacro("__cpp_alias_templates", "200704");
}
@@ -494,9 +505,31 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
}
if (LangOpts.SizedDeallocation)
Builder.defineMacro("__cpp_sized_deallocation", "201309");
+
+ // C++17 features.
+ if (LangOpts.CPlusPlus1z) {
+ Builder.defineMacro("__cpp_hex_float", "201603");
+ Builder.defineMacro("__cpp_inline_variables", "201606");
+ Builder.defineMacro("__cpp_noexcept_function_type", "201510");
+ Builder.defineMacro("__cpp_capture_star_this", "201603");
+ Builder.defineMacro("__cpp_if_constexpr", "201606");
+ Builder.defineMacro("__cpp_template_auto", "201606");
+ Builder.defineMacro("__cpp_namespace_attributes", "201411");
+ Builder.defineMacro("__cpp_enumerator_attributes", "201411");
+ Builder.defineMacro("__cpp_nested_namespace_definitions", "201411");
+ Builder.defineMacro("__cpp_variadic_using", "201611");
+ Builder.defineMacro("__cpp_aggregate_bases", "201603");
+ Builder.defineMacro("__cpp_structured_bindings", "201606");
+ Builder.defineMacro("__cpp_nontype_template_args", "201411");
+ Builder.defineMacro("__cpp_fold_expressions", "201603");
+ }
+ if (LangOpts.AlignedAllocation)
+ Builder.defineMacro("__cpp_aligned_new", "201606");
+
+ // TS features.
if (LangOpts.ConceptsTS)
Builder.defineMacro("__cpp_experimental_concepts", "1");
- if (LangOpts.Coroutines)
+ if (LangOpts.CoroutinesTS)
Builder.defineMacro("__cpp_coroutines", "1");
}
@@ -511,16 +544,12 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
#define TOSTR(X) TOSTR2(X)
Builder.defineMacro("__clang_major__", TOSTR(CLANG_VERSION_MAJOR));
Builder.defineMacro("__clang_minor__", TOSTR(CLANG_VERSION_MINOR));
-#ifdef CLANG_VERSION_PATCHLEVEL
Builder.defineMacro("__clang_patchlevel__", TOSTR(CLANG_VERSION_PATCHLEVEL));
-#else
- Builder.defineMacro("__clang_patchlevel__", "0");
-#endif
+#undef TOSTR
+#undef TOSTR2
Builder.defineMacro("__clang_version__",
"\"" CLANG_VERSION_STRING " "
+ getClangFullRepositoryVersion() + "\"");
-#undef TOSTR
-#undef TOSTR2
if (!LangOpts.MSVCCompat) {
// Currently claim to be compatible with GCC 4.2.1-5621, but only if we're
// not compiling for MSVC compatibility
@@ -564,6 +593,9 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("OBJC_ZEROCOST_EXCEPTIONS");
}
+ Builder.defineMacro("__OBJC_BOOL_IS_BOOL",
+ Twine(TI.useSignedCharForObjCBool() ? "0" : "1"));
+
if (LangOpts.getGC() != LangOptions::NonGC)
Builder.defineMacro("__OBJC_GC__");
@@ -677,7 +709,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// Define type sizing macros based on the target properties.
assert(TI.getCharWidth() == 8 && "Only support 8-bit char so far");
- Builder.defineMacro("__CHAR_BIT__", "8");
+ Builder.defineMacro("__CHAR_BIT__", Twine(TI.getCharWidth()));
DefineTypeSize("__SCHAR_MAX__", TargetInfo::SignedChar, TI, Builder);
DefineTypeSize("__SHRT_MAX__", TargetInfo::SignedShort, TI, Builder);
@@ -958,12 +990,20 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// OpenCL definitions.
if (LangOpts.OpenCL) {
#define OPENCLEXT(Ext) \
- if (TI.getSupportedOpenCLOpts().is_##Ext##_supported( \
+ if (TI.getSupportedOpenCLOpts().isSupported(#Ext, \
LangOpts.OpenCLVersion)) \
Builder.defineMacro(#Ext);
#include "clang/Basic/OpenCLExtensions.def"
}
+ if (TI.hasInt128Type() && LangOpts.CPlusPlus && LangOpts.GNUMode) {
+ // For each extended integer type, g++ defines a macro mapping the
+ // index of the type (0 in this case) in some list of extended types
+ // to the type.
+ Builder.defineMacro("__GLIBCXX_TYPE_INT_N_0", "__int128");
+ Builder.defineMacro("__GLIBCXX_BITSIZE_INT_N_0", "128");
+ }
+
// Get other target #defines.
TI.getTargetDefines(LangOpts, Builder);
}
diff --git a/lib/Frontend/ModuleDependencyCollector.cpp b/lib/Frontend/ModuleDependencyCollector.cpp
index ca11f9b863bb..9b34d4211353 100644
--- a/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/lib/Frontend/ModuleDependencyCollector.cpp
@@ -15,7 +15,6 @@
#include "clang/Frontend/Utils.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Serialization/ASTReader.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -39,6 +38,24 @@ public:
}
};
+struct ModuleDependencyPPCallbacks : public PPCallbacks {
+ ModuleDependencyCollector &Collector;
+ SourceManager &SM;
+ ModuleDependencyPPCallbacks(ModuleDependencyCollector &Collector,
+ SourceManager &SM)
+ : Collector(Collector), SM(SM) {}
+
+ void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
+ StringRef FileName, bool IsAngled,
+ CharSourceRange FilenameRange, const FileEntry *File,
+ StringRef SearchPath, StringRef RelativePath,
+ const Module *Imported) override {
+ if (!File)
+ return;
+ Collector.addFile(File->getName());
+ }
+};
+
struct ModuleDependencyMMCallbacks : public ModuleMapCallbacks {
ModuleDependencyCollector &Collector;
ModuleDependencyMMCallbacks(ModuleDependencyCollector &Collector)
@@ -103,6 +120,8 @@ void ModuleDependencyCollector::attachToASTReader(ASTReader &R) {
}
void ModuleDependencyCollector::attachToPreprocessor(Preprocessor &PP) {
+ PP.addPPCallbacks(llvm::make_unique<ModuleDependencyPPCallbacks>(
+ *this, PP.getSourceManager()));
PP.getHeaderSearchInfo().getModuleMap().addModuleMapCallbacks(
llvm::make_unique<ModuleDependencyMMCallbacks>(*this));
}
@@ -135,6 +154,10 @@ void ModuleDependencyCollector::writeFileMap() {
// allows crash reproducer scripts to work across machines.
VFSWriter.setOverlayDir(VFSDir);
+ // Do not ignore non existent contents otherwise we might skip something
+ // that should have been collected here.
+ VFSWriter.setIgnoreNonExistentContents(false);
+
// Explicitly set case sensitivity for the YAML writer. For that, find out
// the sensitivity at the path where the headers all collected to.
VFSWriter.setCaseSensitivity(isCaseSensitivePath(VFSDir));
@@ -178,7 +201,8 @@ bool ModuleDependencyCollector::getRealPath(StringRef SrcPath,
return true;
}
-std::error_code ModuleDependencyCollector::copyToRoot(StringRef Src) {
+std::error_code ModuleDependencyCollector::copyToRoot(StringRef Src,
+ StringRef Dst) {
using namespace llvm::sys;
// We need an absolute src path to append to the root.
@@ -190,23 +214,35 @@ std::error_code ModuleDependencyCollector::copyToRoot(StringRef Src) {
AbsoluteSrc = path::remove_leading_dotslash(AbsoluteSrc);
// Canonicalize the source path by removing "..", "." components.
- SmallString<256> CanonicalPath = AbsoluteSrc;
- path::remove_dots(CanonicalPath, /*remove_dot_dot=*/true);
+ SmallString<256> VirtualPath = AbsoluteSrc;
+ path::remove_dots(VirtualPath, /*remove_dot_dot=*/true);
// If a ".." component is present after a symlink component, remove_dots may
// lead to the wrong real destination path. Let the source be canonicalized
// like that but make sure we always use the real path for the destination.
- SmallString<256> RealPath;
- if (!getRealPath(AbsoluteSrc, RealPath))
- RealPath = CanonicalPath;
- SmallString<256> Dest = getDest();
- path::append(Dest, path::relative_path(RealPath));
+ SmallString<256> CopyFrom;
+ if (!getRealPath(AbsoluteSrc, CopyFrom))
+ CopyFrom = VirtualPath;
+ SmallString<256> CacheDst = getDest();
+
+ if (Dst.empty()) {
+ // The common case is to map the virtual path to the same path inside the
+ // cache.
+ path::append(CacheDst, path::relative_path(CopyFrom));
+ } else {
+ // When collecting entries from input vfsoverlays, copy the external
+ // contents into the cache but still map from the source.
+ if (!fs::exists(Dst))
+ return std::error_code();
+ path::append(CacheDst, Dst);
+ CopyFrom = Dst;
+ }
// Copy the file into place.
- if (std::error_code EC = fs::create_directories(path::parent_path(Dest),
- /*IgnoreExisting=*/true))
+ if (std::error_code EC = fs::create_directories(path::parent_path(CacheDst),
+ /*IgnoreExisting=*/true))
return EC;
- if (std::error_code EC = fs::copy_file(RealPath, Dest))
+ if (std::error_code EC = fs::copy_file(CopyFrom, CacheDst))
return EC;
// Always map a canonical src path to its real path into the YAML, by doing
@@ -214,12 +250,12 @@ std::error_code ModuleDependencyCollector::copyToRoot(StringRef Src) {
// overlay, which is a way to emulate symlink inside the VFS; this is also
// needed for correctness, not doing that can lead to module redifinition
// errors.
- addFileMapping(CanonicalPath, Dest);
+ addFileMapping(VirtualPath, CacheDst);
return std::error_code();
}
-void ModuleDependencyCollector::addFile(StringRef Filename) {
+void ModuleDependencyCollector::addFile(StringRef Filename, StringRef FileDst) {
if (insertSeen(Filename))
- if (copyToRoot(Filename))
+ if (copyToRoot(Filename, FileDst))
HasErrors = true;
}
diff --git a/lib/Frontend/MultiplexConsumer.cpp b/lib/Frontend/MultiplexConsumer.cpp
index 17cdaee4be05..8ef6df5e740d 100644
--- a/lib/Frontend/MultiplexConsumer.cpp
+++ b/lib/Frontend/MultiplexConsumer.cpp
@@ -120,6 +120,7 @@ public:
void CompletedImplicitDefinition(const FunctionDecl *D) override;
void StaticDataMemberInstantiated(const VarDecl *D) override;
void DefaultArgumentInstantiated(const ParmVarDecl *D) override;
+ void DefaultMemberInitializerInstantiated(const FieldDecl *D) override;
void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
const ObjCInterfaceDecl *IFD) override;
void FunctionDefinitionInstantiated(const FunctionDecl *D) override;
@@ -201,6 +202,11 @@ void MultiplexASTMutationListener::DefaultArgumentInstantiated(
for (size_t i = 0, e = Listeners.size(); i != e; ++i)
Listeners[i]->DefaultArgumentInstantiated(D);
}
+void MultiplexASTMutationListener::DefaultMemberInitializerInstantiated(
+ const FieldDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->DefaultMemberInitializerInstantiated(D);
+}
void MultiplexASTMutationListener::AddedObjCCategoryToInterface(
const ObjCCategoryDecl *CatD,
const ObjCInterfaceDecl *IFD) {
diff --git a/lib/Frontend/PCHContainerOperations.cpp b/lib/Frontend/PCHContainerOperations.cpp
index 2d4edde43280..eebebf327a19 100644
--- a/lib/Frontend/PCHContainerOperations.cpp
+++ b/lib/Frontend/PCHContainerOperations.cpp
@@ -58,10 +58,9 @@ std::unique_ptr<ASTConsumer> RawPCHContainerWriter::CreatePCHContainerGenerator(
return llvm::make_unique<RawPCHContainerGenerator>(std::move(OS), Buffer);
}
-void RawPCHContainerReader::ExtractPCH(
- llvm::MemoryBufferRef Buffer, llvm::BitstreamReader &StreamFile) const {
- StreamFile.init((const unsigned char *)Buffer.getBufferStart(),
- (const unsigned char *)Buffer.getBufferEnd());
+StringRef
+RawPCHContainerReader::ExtractPCH(llvm::MemoryBufferRef Buffer) const {
+ return Buffer.getBuffer();
}
PCHContainerOperations::PCHContainerOperations() {
diff --git a/lib/Frontend/PrintPreprocessedOutput.cpp b/lib/Frontend/PrintPreprocessedOutput.cpp
index 77b80e612fbf..d48b952ef203 100644
--- a/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -93,13 +93,16 @@ private:
bool Initialized;
bool DisableLineMarkers;
bool DumpDefines;
+ bool DumpIncludeDirectives;
bool UseLineDirectives;
bool IsFirstFileEntered;
public:
PrintPPOutputPPCallbacks(Preprocessor &pp, raw_ostream &os, bool lineMarkers,
- bool defines, bool UseLineDirectives)
+ bool defines, bool DumpIncludeDirectives,
+ bool UseLineDirectives)
: PP(pp), SM(PP.getSourceManager()), ConcatInfo(PP), OS(os),
DisableLineMarkers(lineMarkers), DumpDefines(defines),
+ DumpIncludeDirectives(DumpIncludeDirectives),
UseLineDirectives(UseLineDirectives) {
CurLine = 0;
CurFilename += "<uninit>";
@@ -320,10 +323,10 @@ void PrintPPOutputPPCallbacks::InclusionDirective(SourceLocation HashLoc,
StringRef SearchPath,
StringRef RelativePath,
const Module *Imported) {
- // When preprocessing, turn implicit imports into @imports.
- // FIXME: This is a stop-gap until a more comprehensive "preprocessing with
- // modules" solution is introduced.
if (Imported) {
+ // When preprocessing, turn implicit imports into @imports.
+ // FIXME: This is a stop-gap until a more comprehensive "preprocessing with
+ // modules" solution is introduced.
startNewLineIfNeeded();
MoveToLine(HashLoc);
if (PP.getLangOpts().ObjC2) {
@@ -331,9 +334,9 @@ void PrintPPOutputPPCallbacks::InclusionDirective(SourceLocation HashLoc,
<< " /* clang -E: implicit import for \"" << File->getName()
<< "\" */";
} else {
- // FIXME: Preseve whether this was a
- // #include/#include_next/#include_macros/#import.
- OS << "#include "
+ const std::string TokenText = PP.getSpelling(IncludeTok);
+ assert(!TokenText.empty());
+ OS << "#" << TokenText << " "
<< (IsAngled ? '<' : '"')
<< FileName
<< (IsAngled ? '>' : '"')
@@ -344,6 +347,20 @@ void PrintPPOutputPPCallbacks::InclusionDirective(SourceLocation HashLoc,
// line immediately.
EmittedTokensOnThisLine = true;
startNewLineIfNeeded();
+ } else {
+ // Not a module import; it's a more vanilla inclusion of some file using one
+ // of: #include, #import, #include_next, #include_macros.
+ if (DumpIncludeDirectives) {
+ startNewLineIfNeeded();
+ MoveToLine(HashLoc);
+ const std::string TokenText = PP.getSpelling(IncludeTok);
+ assert(!TokenText.empty());
+ OS << "#" << TokenText << " "
+ << (IsAngled ? '<' : '"') << FileName << (IsAngled ? '>' : '"')
+ << " /* clang -E -dI */";
+ setEmittedDirectiveOnThisLine();
+ startNewLineIfNeeded();
+ }
}
}
@@ -751,7 +768,8 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
PP.SetCommentRetentionState(Opts.ShowComments, Opts.ShowMacroComments);
PrintPPOutputPPCallbacks *Callbacks = new PrintPPOutputPPCallbacks(
- PP, *OS, !Opts.ShowLineMarkers, Opts.ShowMacros, Opts.UseLineDirectives);
+ PP, *OS, !Opts.ShowLineMarkers, Opts.ShowMacros,
+ Opts.ShowIncludeDirectives, Opts.UseLineDirectives);
// Expand macros in pragmas with -fms-extensions. The assumption is that
// the majority of pragmas in such a file will be Microsoft pragmas.
diff --git a/lib/Frontend/Rewrite/FrontendActions.cpp b/lib/Frontend/Rewrite/FrontendActions.cpp
index 13d410e21381..2e76e2e3151e 100644
--- a/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -9,13 +9,12 @@
#include "clang/Rewrite/Frontend/FrontendActions.h"
#include "clang/AST/ASTConsumer.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendActions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/Preprocessor.h"
-#include "clang/Parse/Parser.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Rewrite/Frontend/ASTConsumers.h"
#include "clang/Rewrite/Frontend/FixItRewriter.h"
#include "clang/Rewrite/Frontend/Rewriters.h"
diff --git a/lib/Frontend/Rewrite/HTMLPrint.cpp b/lib/Frontend/Rewrite/HTMLPrint.cpp
index f5fad346124a..11e431de0a31 100644
--- a/lib/Frontend/Rewrite/HTMLPrint.cpp
+++ b/lib/Frontend/Rewrite/HTMLPrint.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/Frontend/ASTConsumers.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -21,7 +20,7 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Rewrite/Core/HTMLRewrite.h"
#include "clang/Rewrite/Core/Rewriter.h"
-#include "llvm/Support/MemoryBuffer.h"
+#include "clang/Rewrite/Frontend/ASTConsumers.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -65,7 +64,7 @@ void HTMLPrinter::HandleTranslationUnit(ASTContext &Ctx) {
// Format the file.
FileID FID = R.getSourceMgr().getMainFileID();
const FileEntry* Entry = R.getSourceMgr().getFileEntryForID(FID);
- const char* Name;
+ StringRef Name;
// In some cases, in particular the case where the input is from stdin,
// there is no entry. Fall back to the memory buffer for a name in those
// cases.
diff --git a/lib/Frontend/Rewrite/InclusionRewriter.cpp b/lib/Frontend/Rewrite/InclusionRewriter.cpp
index b761c34fcbde..d953da2e4fd2 100644
--- a/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -68,7 +68,7 @@ private:
CharSourceRange FilenameRange, const FileEntry *File,
StringRef SearchPath, StringRef RelativePath,
const Module *Imported) override;
- void WriteLineInfo(const char *Filename, int Line,
+ void WriteLineInfo(StringRef Filename, int Line,
SrcMgr::CharacteristicKind FileType,
StringRef Extra = StringRef());
void WriteImplicitModuleImport(const Module *Mod);
@@ -102,7 +102,7 @@ InclusionRewriter::InclusionRewriter(Preprocessor &PP, raw_ostream &OS,
/// markers depending on what mode we're in, including the \p Filename and
/// \p Line we are located at, using the specified \p EOL line separator, and
/// any \p Extra context specifiers in GNU line directives.
-void InclusionRewriter::WriteLineInfo(const char *Filename, int Line,
+void InclusionRewriter::WriteLineInfo(StringRef Filename, int Line,
SrcMgr::CharacteristicKind FileType,
StringRef Extra) {
if (!ShowLineMarkers)
@@ -406,7 +406,7 @@ bool InclusionRewriter::Process(FileID FileId,
bool Invalid;
const MemoryBuffer &FromFile = *SM.getBuffer(FileId, &Invalid);
assert(!Invalid && "Attempting to process invalid inclusion");
- const char *FileName = FromFile.getBufferIdentifier();
+ StringRef FileName = FromFile.getBufferIdentifier();
Lexer RawLex(FileId, &FromFile, PP.getSourceManager(), PP.getLangOpts());
RawLex.SetCommentRetentionState(false);
diff --git a/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index ad217517d7d7..e7bfcedd2176 100644
--- a/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -863,9 +863,9 @@ RewriteModernObjC::getIvarAccessString(ObjCIvarDecl *D) {
CDecl = CatDecl->getClassInterface();
std::string RecName = CDecl->getName();
RecName += "_IMPL";
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- &Context->Idents.get(RecName.c_str()));
+ RecordDecl *RD =
+ RecordDecl::Create(*Context, TTK_Struct, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(RecName));
QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
@@ -5301,11 +5301,9 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
// Initialize the block descriptor.
std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA";
- VarDecl *NewVD = VarDecl::Create(*Context, TUDecl,
- SourceLocation(), SourceLocation(),
- &Context->Idents.get(DescData.c_str()),
- Context->VoidPtrTy, nullptr,
- SC_Static);
+ VarDecl *NewVD = VarDecl::Create(
+ *Context, TUDecl, SourceLocation(), SourceLocation(),
+ &Context->Idents.get(DescData), Context->VoidPtrTy, nullptr, SC_Static);
UnaryOperator *DescRefExpr =
new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
Context->VoidPtrTy,
@@ -6350,8 +6348,7 @@ static void Write_method_list_t_initializer(RewriteModernObjC &RewriteObj,
Result += "\t{(struct objc_selector *)\"";
Result += (MD)->getSelector().getAsString(); Result += "\"";
Result += ", ";
- std::string MethodTypeString;
- Context->getObjCEncodingForMethodDecl(MD, MethodTypeString);
+ std::string MethodTypeString = Context->getObjCEncodingForMethodDecl(MD);
Result += "\""; Result += MethodTypeString; Result += "\"";
Result += ", ";
if (!MethodImpl)
@@ -6390,8 +6387,9 @@ static void Write_prop_list_t_initializer(RewriteModernObjC &RewriteObj,
else
Result += "\t{\"";
Result += PropDecl->getName(); Result += "\",";
- std::string PropertyTypeString, QuotePropertyTypeString;
- Context->getObjCEncodingForPropertyDecl(PropDecl, Container, PropertyTypeString);
+ std::string PropertyTypeString =
+ Context->getObjCEncodingForPropertyDecl(PropDecl, Container);
+ std::string QuotePropertyTypeString;
RewriteObj.QuoteDoublequotes(PropertyTypeString, QuotePropertyTypeString);
Result += "\""; Result += QuotePropertyTypeString; Result += "\"";
if (i == e-1)
@@ -6720,8 +6718,9 @@ static void Write__extendedMethodTypes_initializer(RewriteModernObjC &RewriteObj
Result += "{\n";
for (unsigned i = 0, e = Methods.size(); i < e; i++) {
ObjCMethodDecl *MD = Methods[i];
- std::string MethodTypeString, QuoteMethodTypeString;
- Context->getObjCEncodingForMethodDecl(MD, MethodTypeString, true);
+ std::string MethodTypeString =
+ Context->getObjCEncodingForMethodDecl(MD, true);
+ std::string QuoteMethodTypeString;
RewriteObj.QuoteDoublequotes(MethodTypeString, QuoteMethodTypeString);
Result += "\t\""; Result += QuoteMethodTypeString; Result += "\"";
if (i == e-1)
@@ -7522,9 +7521,9 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
CDecl = CatDecl->getClassInterface();
std::string RecName = CDecl->getName();
RecName += "_IMPL";
- RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
- SourceLocation(), SourceLocation(),
- &Context->Idents.get(RecName.c_str()));
+ RecordDecl *RD = RecordDecl::Create(
+ *Context, TTK_Struct, TUDecl, SourceLocation(), SourceLocation(),
+ &Context->Idents.get(RecName));
QualType PtrStructIMPL = Context->getPointerType(Context->getTagDeclType(RD));
unsigned UnsignedIntSize =
static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
diff --git a/lib/Frontend/Rewrite/RewriteObjC.cpp b/lib/Frontend/Rewrite/RewriteObjC.cpp
index 5967e40bfed9..e842e592cbbe 100644
--- a/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -4426,11 +4426,9 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
// Initialize the block descriptor.
std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA";
- VarDecl *NewVD = VarDecl::Create(*Context, TUDecl,
- SourceLocation(), SourceLocation(),
- &Context->Idents.get(DescData.c_str()),
- Context->VoidPtrTy, nullptr,
- SC_Static);
+ VarDecl *NewVD = VarDecl::Create(
+ *Context, TUDecl, SourceLocation(), SourceLocation(),
+ &Context->Idents.get(DescData), Context->VoidPtrTy, nullptr, SC_Static);
UnaryOperator *DescRefExpr =
new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
Context->VoidPtrTy,
@@ -5126,8 +5124,7 @@ void RewriteObjCFragileABI::RewriteObjCProtocolMetaData(
else
Result += "\t ,{(struct objc_selector *)\"";
Result += (*I)->getSelector().getAsString();
- std::string MethodTypeString;
- Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
+ std::string MethodTypeString = Context->getObjCEncodingForMethodDecl(*I);
Result += "\", \"";
Result += MethodTypeString;
Result += "\"}\n";
@@ -5164,8 +5161,7 @@ void RewriteObjCFragileABI::RewriteObjCProtocolMetaData(
else
Result += "\t ,{(struct objc_selector *)\"";
Result += (*I)->getSelector().getAsString();
- std::string MethodTypeString;
- Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
+ std::string MethodTypeString = Context->getObjCEncodingForMethodDecl(*I);
Result += "\", \"";
Result += MethodTypeString;
Result += "\"}\n";
@@ -5650,14 +5646,12 @@ void RewriteObjCFragileABI::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *ID
InstanceMethods.push_back(Setter);
}
RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
- true, "CATEGORY_", FullCategoryName.c_str(),
- Result);
-
+ true, "CATEGORY_", FullCategoryName, Result);
+
// Build _objc_method_list for class's class methods if needed
RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
- false, "CATEGORY_", FullCategoryName.c_str(),
- Result);
-
+ false, "CATEGORY_", FullCategoryName, Result);
+
// Protocols referenced in class declaration?
// Null CDecl is case of a category implementation with no category interface
if (CDecl)
@@ -5776,9 +5770,9 @@ void RewriteObjCFragileABI::RewriteObjCMethodsMetaData(MethodIterator MethodBegi
Result += "{\n\t0, " + utostr(NumMethods) + "\n";
Result += "\t,{{(SEL)\"";
- Result += (*MethodBegin)->getSelector().getAsString().c_str();
- std::string MethodTypeString;
- Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += (*MethodBegin)->getSelector().getAsString();
+ std::string MethodTypeString =
+ Context->getObjCEncodingForMethodDecl(*MethodBegin);
Result += "\", \"";
Result += MethodTypeString;
Result += "\", (void *)";
@@ -5786,9 +5780,9 @@ void RewriteObjCFragileABI::RewriteObjCMethodsMetaData(MethodIterator MethodBegi
Result += "}\n";
for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
Result += "\t ,{(SEL)\"";
- Result += (*MethodBegin)->getSelector().getAsString().c_str();
- std::string MethodTypeString;
- Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += (*MethodBegin)->getSelector().getAsString();
+ std::string MethodTypeString =
+ Context->getObjCEncodingForMethodDecl(*MethodBegin);
Result += "\", \"";
Result += MethodTypeString;
Result += "\", (void *)";
diff --git a/lib/Frontend/Rewrite/RewriteTest.cpp b/lib/Frontend/Rewrite/RewriteTest.cpp
index 722c5e80b443..b0791f4cddd7 100644
--- a/lib/Frontend/Rewrite/RewriteTest.cpp
+++ b/lib/Frontend/Rewrite/RewriteTest.cpp
@@ -11,12 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Rewrite/Frontend/Rewriters.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Rewrite/Core/TokenRewriter.h"
+#include "clang/Rewrite/Frontend/Rewriters.h"
#include "llvm/Support/raw_ostream.h"
-void clang::DoRewriteTest(Preprocessor &PP, raw_ostream* OS) {
+void clang::DoRewriteTest(Preprocessor &PP, raw_ostream *OS) {
SourceManager &SM = PP.getSourceManager();
const LangOptions &LangOpts = PP.getLangOpts();
diff --git a/lib/Frontend/SerializedDiagnosticPrinter.cpp b/lib/Frontend/SerializedDiagnosticPrinter.cpp
index 5c42406876b6..1ea5a342e1d8 100644
--- a/lib/Frontend/SerializedDiagnosticPrinter.cpp
+++ b/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -10,9 +10,7 @@
#include "clang/Frontend/SerializedDiagnosticPrinter.h"
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticOptions.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Basic/Version.h"
#include "clang/Frontend/DiagnosticRenderer.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/SerializedDiagnosticReader.h"
@@ -25,7 +23,6 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/raw_ostream.h"
#include <utility>
-#include <vector>
using namespace clang;
using namespace clang::serialized_diags;
@@ -439,7 +436,7 @@ static void AddRangeLocationAbbrev(llvm::BitCodeAbbrev *Abbrev) {
}
void SDiagsWriter::EmitBlockInfoBlock() {
- State->Stream.EnterBlockInfoBlock(3);
+ State->Stream.EnterBlockInfoBlock();
using namespace llvm;
llvm::BitstreamWriter &Stream = State->Stream;
diff --git a/lib/Frontend/SerializedDiagnosticReader.cpp b/lib/Frontend/SerializedDiagnosticReader.cpp
index 0ebbd22af274..c4461d452e7b 100644
--- a/lib/Frontend/SerializedDiagnosticReader.cpp
+++ b/lib/Frontend/SerializedDiagnosticReader.cpp
@@ -11,7 +11,6 @@
#include "clang/Basic/FileManager.h"
#include "clang/Frontend/SerializedDiagnostics.h"
#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/MemoryBuffer.h"
using namespace clang;
using namespace clang::serialized_diags;
@@ -25,11 +24,8 @@ std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
if (!Buffer)
return SDError::CouldNotLoad;
- llvm::BitstreamReader StreamFile;
- StreamFile.init((const unsigned char *)(*Buffer)->getBufferStart(),
- (const unsigned char *)(*Buffer)->getBufferEnd());
-
- llvm::BitstreamCursor Stream(StreamFile);
+ llvm::BitstreamCursor Stream(**Buffer);
+ Optional<llvm::BitstreamBlockInfo> BlockInfo;
// Sniff for the signature.
if (Stream.Read(8) != 'D' ||
@@ -45,10 +41,13 @@ std::error_code SerializedDiagnosticReader::readDiagnostics(StringRef File) {
std::error_code EC;
switch (Stream.ReadSubBlockID()) {
- case llvm::bitc::BLOCKINFO_BLOCK_ID:
- if (Stream.ReadBlockInfoBlock())
+ case llvm::bitc::BLOCKINFO_BLOCK_ID: {
+ BlockInfo = Stream.ReadBlockInfoBlock();
+ if (!BlockInfo)
return SDError::MalformedBlockInfoBlock;
+ Stream.setBlockInfo(&*BlockInfo);
continue;
+ }
case BLOCK_META:
if ((EC = readMetaBlock(Stream)))
return EC;
@@ -251,7 +250,7 @@ SerializedDiagnosticReader::readDiagnosticBlock(llvm::BitstreamCursor &Stream) {
namespace {
class SDErrorCategoryType final : public std::error_category {
- const char *name() const LLVM_NOEXCEPT override {
+ const char *name() const noexcept override {
return "clang.serialized_diags";
}
std::string message(int IE) const override {
diff --git a/lib/Frontend/TextDiagnostic.cpp b/lib/Frontend/TextDiagnostic.cpp
index 977af079a77a..a4937386b93f 100644
--- a/lib/Frontend/TextDiagnostic.cpp
+++ b/lib/Frontend/TextDiagnostic.cpp
@@ -18,7 +18,7 @@
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Locale.h"
-#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -119,16 +119,17 @@ printableTextForNextCharacter(StringRef SourceLine, size_t *i,
begin = reinterpret_cast<unsigned char const *>(&*(SourceLine.begin() + *i));
end = begin + (SourceLine.size() - *i);
- if (isLegalUTF8Sequence(begin, end)) {
- UTF32 c;
- UTF32 *cptr = &c;
+ if (llvm::isLegalUTF8Sequence(begin, end)) {
+ llvm::UTF32 c;
+ llvm::UTF32 *cptr = &c;
unsigned char const *original_begin = begin;
- unsigned char const *cp_end = begin+getNumBytesForUTF8(SourceLine[*i]);
+ unsigned char const *cp_end =
+ begin + llvm::getNumBytesForUTF8(SourceLine[*i]);
- ConversionResult res = ConvertUTF8toUTF32(&begin, cp_end, &cptr, cptr+1,
- strictConversion);
+ llvm::ConversionResult res = llvm::ConvertUTF8toUTF32(
+ &begin, cp_end, &cptr, cptr + 1, llvm::strictConversion);
(void)res;
- assert(conversionOK==res);
+ assert(llvm::conversionOK == res);
assert(0 < begin-original_begin
&& "we must be further along in the string now");
*i += begin-original_begin;
@@ -764,6 +765,22 @@ void TextDiagnostic::printDiagnosticMessage(raw_ostream &OS,
OS << '\n';
}
+void TextDiagnostic::emitFilename(StringRef Filename, const SourceManager &SM) {
+ SmallVector<char, 128> AbsoluteFilename;
+ if (DiagOpts->AbsolutePath) {
+ const DirectoryEntry *Dir = SM.getFileManager().getDirectory(
+ llvm::sys::path::parent_path(Filename));
+ if (Dir) {
+ StringRef DirName = SM.getFileManager().getCanonicalName(Dir);
+ llvm::sys::path::append(AbsoluteFilename, DirName,
+ llvm::sys::path::filename(Filename));
+ Filename = StringRef(AbsoluteFilename.data(), AbsoluteFilename.size());
+ }
+ }
+
+ OS << Filename;
+}
+
/// \brief Print out the file/line/column information and include trace.
///
/// This method handlen the emission of the diagnostic location information.
@@ -780,7 +797,7 @@ void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
if (FID.isValid()) {
const FileEntry* FE = SM.getFileEntryForID(FID);
if (FE && FE->isValid()) {
- OS << FE->getName();
+ emitFilename(FE->getName(), SM);
if (FE->isInPCH())
OS << " (in PCH)";
OS << ": ";
@@ -796,7 +813,7 @@ void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
if (DiagOpts->ShowColors)
OS.changeColor(savedColor, true);
- OS << PLoc.getFilename();
+ emitFilename(PLoc.getFilename(), SM);
switch (DiagOpts->getFormat()) {
case DiagnosticOptions::Clang: OS << ':' << LineNo; break;
case DiagnosticOptions::MSVC: OS << '(' << LineNo; break;
diff --git a/lib/Frontend/TextDiagnosticPrinter.cpp b/lib/Frontend/TextDiagnosticPrinter.cpp
index 66b46b7814eb..17646b48e23d 100644
--- a/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -13,13 +13,11 @@
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Basic/DiagnosticOptions.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/TextDiagnostic.h"
#include "clang/Lex/Lexer.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
using namespace clang;
diff --git a/lib/Frontend/VerifyDiagnosticConsumer.cpp b/lib/Frontend/VerifyDiagnosticConsumer.cpp
index 7331d77d1c18..ae16ea177ffe 100644
--- a/lib/Frontend/VerifyDiagnosticConsumer.cpp
+++ b/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -43,7 +43,8 @@ VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
assert(!CurrentPreprocessor && "CurrentPreprocessor should be invalid!");
SrcManager = nullptr;
CheckDiagnostics();
- Diags.takeClient().release();
+ assert(!Diags.ownsClient() &&
+ "The VerifyDiagnosticConsumer takes over ownership of the client!");
}
#ifndef NDEBUG
diff --git a/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 509c326d1597..187a6e76245d 100644
--- a/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -52,7 +52,10 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
case EmitCodeGenOnly: return llvm::make_unique<EmitCodeGenOnlyAction>();
case EmitObj: return llvm::make_unique<EmitObjAction>();
case FixIt: return llvm::make_unique<FixItAction>();
- case GenerateModule: return llvm::make_unique<GenerateModuleAction>();
+ case GenerateModule:
+ return llvm::make_unique<GenerateModuleFromModuleMapAction>();
+ case GenerateModuleInterface:
+ return llvm::make_unique<GenerateModuleInterfaceAction>();
case GeneratePCH: return llvm::make_unique<GeneratePCHAction>();
case GeneratePTH: return llvm::make_unique<GeneratePTHAction>();
case InitOnly: return llvm::make_unique<InitOnlyAction>();
@@ -229,6 +232,11 @@ bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
ento::printCheckerHelp(llvm::outs(), Clang->getFrontendOpts().Plugins);
return true;
}
+ if (Clang->getAnalyzerOpts()->ShowEnabledCheckerList) {
+ ento::printEnabledCheckerList(llvm::outs(),
+ Clang->getFrontendOpts().Plugins,
+ *Clang->getAnalyzerOpts());
+ }
#endif
// If there were errors in processing arguments, don't do anything else.
diff --git a/lib/Headers/CMakeLists.txt b/lib/Headers/CMakeLists.txt
index fa2d2107781b..efc4dd0971b6 100644
--- a/lib/Headers/CMakeLists.txt
+++ b/lib/Headers/CMakeLists.txt
@@ -3,6 +3,7 @@ set(files
altivec.h
ammintrin.h
arm_acle.h
+ armintr.h
avx2intrin.h
avx512bwintrin.h
avx512cdintrin.h
@@ -21,12 +22,13 @@ set(files
avxintrin.h
bmi2intrin.h
bmiintrin.h
+ __clang_cuda_builtin_vars.h
__clang_cuda_cmath.h
+ __clang_cuda_complex_builtins.h
__clang_cuda_intrinsics.h
__clang_cuda_math_forward_declares.h
__clang_cuda_runtime_wrapper.h
cpuid.h
- cuda_builtin_vars.h
clflushoptintrin.h
emmintrin.h
f16cintrin.h
@@ -88,6 +90,12 @@ set(files
xtestintrin.h
)
+set(cuda_wrapper_files
+ cuda_wrappers/algorithm
+ cuda_wrappers/complex
+ cuda_wrappers/new
+)
+
set(output_dir ${LLVM_LIBRARY_OUTPUT_INTDIR}/clang/${CLANG_VERSION}/include)
# Generate arm_neon.h
@@ -95,7 +103,7 @@ clang_tablegen(arm_neon.h -gen-arm-neon
SOURCE ${CLANG_SOURCE_DIR}/include/clang/Basic/arm_neon.td)
set(out_files)
-foreach( f ${files} )
+foreach( f ${files} ${cuda_wrapper_files} )
set( src ${CMAKE_CURRENT_SOURCE_DIR}/${f} )
set( dst ${output_dir}/${f} )
add_custom_command(OUTPUT ${dst}
@@ -120,6 +128,12 @@ install(
PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
DESTINATION lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}/include)
+install(
+ FILES ${cuda_wrapper_files}
+ COMPONENT clang-headers
+ PERMISSIONS OWNER_READ OWNER_WRITE GROUP_READ WORLD_READ
+ DESTINATION lib${LLVM_LIBDIR_SUFFIX}/clang/${CLANG_VERSION}/include/cuda_wrappers)
+
if (NOT CMAKE_CONFIGURATION_TYPES) # don't add this for IDE's.
add_custom_target(install-clang-headers
DEPENDS clang-headers
diff --git a/lib/Headers/cuda_builtin_vars.h b/lib/Headers/__clang_cuda_builtin_vars.h
index 6f5eb9c78d85..6f5eb9c78d85 100644
--- a/lib/Headers/cuda_builtin_vars.h
+++ b/lib/Headers/__clang_cuda_builtin_vars.h
diff --git a/lib/Headers/__clang_cuda_cmath.h b/lib/Headers/__clang_cuda_cmath.h
index ae7ff2f8d306..0eaa08b30cab 100644
--- a/lib/Headers/__clang_cuda_cmath.h
+++ b/lib/Headers/__clang_cuda_cmath.h
@@ -26,13 +26,15 @@
#error "This file is for CUDA compilation only."
#endif
+#include <limits>
+
// CUDA lets us use various std math functions on the device side. This file
// works in concert with __clang_cuda_math_forward_declares.h to make this work.
//
// Specifically, the forward-declares header declares __device__ overloads for
// these functions in the global namespace, then pulls them into namespace std
// with 'using' statements. Then this file implements those functions, after
-// the implementations have been pulled in.
+// their implementations have been pulled in.
//
// It's important that we declare the functions in the global namespace and pull
// them into namespace std with using statements, as opposed to simply declaring
@@ -73,7 +75,10 @@ __DEVICE__ float frexp(float __arg, int *__exp) {
__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
-__DEVICE__ bool isfinite(double __x) { return ::__finite(__x); }
+// For inscrutable reasons, __finite(), the double-precision version of
+// __finitef, does not exist when compiling for MacOS. __isfinited is available
+// everywhere and is just as good.
+__DEVICE__ bool isfinite(double __x) { return ::__isfinited(__x); }
__DEVICE__ bool isgreater(float __x, float __y) {
return __builtin_isgreater(__x, __y);
}
@@ -120,12 +125,15 @@ __DEVICE__ float ldexp(float __arg, int __exp) {
__DEVICE__ float log(float __x) { return ::logf(__x); }
__DEVICE__ float log10(float __x) { return ::log10f(__x); }
__DEVICE__ float modf(float __x, float *__iptr) { return ::modff(__x, __iptr); }
-__DEVICE__ float nexttoward(float __from, float __to) {
+__DEVICE__ float nexttoward(float __from, double __to) {
return __builtin_nexttowardf(__from, __to);
}
__DEVICE__ double nexttoward(double __from, double __to) {
return __builtin_nexttoward(__from, __to);
}
+__DEVICE__ float nexttowardf(float __from, double __to) {
+ return __builtin_nexttowardf(__from, __to);
+}
__DEVICE__ float pow(float __base, float __exp) {
return ::powf(__base, __exp);
}
@@ -136,13 +144,338 @@ __DEVICE__ double pow(double __base, int __iexp) {
return ::powi(__base, __iexp);
}
__DEVICE__ bool signbit(float __x) { return ::__signbitf(__x); }
-__DEVICE__ bool signbit(double __x) { return ::__signbit(__x); }
+__DEVICE__ bool signbit(double __x) { return ::__signbitd(__x); }
__DEVICE__ float sin(float __x) { return ::sinf(__x); }
__DEVICE__ float sinh(float __x) { return ::sinhf(__x); }
__DEVICE__ float sqrt(float __x) { return ::sqrtf(__x); }
__DEVICE__ float tan(float __x) { return ::tanf(__x); }
__DEVICE__ float tanh(float __x) { return ::tanhf(__x); }
+// Now we've defined everything we promised we'd define in
+// __clang_cuda_math_forward_declares.h. We need to do two additional things to
+// fix up our math functions.
+//
+// 1) Define __device__ overloads for e.g. sin(int). The CUDA headers define
+// only sin(float) and sin(double), which means that e.g. sin(0) is
+// ambiguous.
+//
+// 2) Pull the __device__ overloads of "foobarf" math functions into namespace
+// std. These are defined in the CUDA headers in the global namespace,
+// independent of everything else we've done here.
+
+// We can't use std::enable_if, because we want to be pre-C++11 compatible. But
+// we go ahead and unconditionally define functions that are only available when
+// compiling for C++11 to match the behavior of the CUDA headers.
+template<bool __B, class __T = void>
+struct __clang_cuda_enable_if {};
+
+template <class __T> struct __clang_cuda_enable_if<true, __T> {
+ typedef __T type;
+};
+
+// Defines an overload of __fn that accepts one integral argument, calls
+// __fn((double)x), and returns __retty.
+#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_1(__retty, __fn) \
+ template <typename __T> \
+ __DEVICE__ \
+ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer, \
+ __retty>::type \
+ __fn(__T __x) { \
+ return ::__fn((double)__x); \
+ }
+
+// Defines an overload of __fn that accepts one two arithmetic arguments, calls
+// __fn((double)x, (double)y), and returns a double.
+//
+// Note this is different from OVERLOAD_1, which generates an overload that
+// accepts only *integral* arguments.
+#define __CUDA_CLANG_FN_INTEGER_OVERLOAD_2(__retty, __fn) \
+ template <typename __T1, typename __T2> \
+ __DEVICE__ typename __clang_cuda_enable_if< \
+ std::numeric_limits<__T1>::is_specialized && \
+ std::numeric_limits<__T2>::is_specialized, \
+ __retty>::type \
+ __fn(__T1 __x, __T2 __y) { \
+ return __fn((double)__x, (double)__y); \
+ }
+
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acos)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, acosh)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asin)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, asinh)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atan)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, atan2);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, atanh)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cbrt)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, ceil)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, copysign);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cos)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, cosh)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erf)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, erfc)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, exp2)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, expm1)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, fabs)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fdim);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, floor)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmax);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmin);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, fmod);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, fpclassify)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, hypot);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(int, ilogb)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isfinite)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreater);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isgreaterequal);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isinf);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isless);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessequal);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, islessgreater);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnan);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, isnormal)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(bool, isunordered);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, lgamma)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log10)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log1p)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, log2)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, logb)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llrint)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long long, llround)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lrint)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(long, lround)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, nearbyint);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, nextafter);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, pow);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_2(double, remainder);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, rint);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, round);
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(bool, signbit)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sin)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sinh)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, sqrt)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tan)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tanh)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, tgamma)
+__CUDA_CLANG_FN_INTEGER_OVERLOAD_1(double, trunc);
+
+#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_1
+#undef __CUDA_CLANG_FN_INTEGER_OVERLOAD_2
+
+// Overloads for functions that don't match the patterns expected by
+// __CUDA_CLANG_FN_INTEGER_OVERLOAD_{1,2}.
+template <typename __T1, typename __T2, typename __T3>
+__DEVICE__ typename __clang_cuda_enable_if<
+ std::numeric_limits<__T1>::is_specialized &&
+ std::numeric_limits<__T2>::is_specialized &&
+ std::numeric_limits<__T3>::is_specialized,
+ double>::type
+fma(__T1 __x, __T2 __y, __T3 __z) {
+ return std::fma((double)__x, (double)__y, (double)__z);
+}
+
+template <typename __T>
+__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
+ double>::type
+frexp(__T __x, int *__exp) {
+ return std::frexp((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
+ double>::type
+ldexp(__T __x, int __exp) {
+ return std::ldexp((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
+ double>::type
+nexttoward(__T __from, double __to) {
+ return std::nexttoward((double)__from, __to);
+}
+
+template <typename __T1, typename __T2>
+__DEVICE__ typename __clang_cuda_enable_if<
+ std::numeric_limits<__T1>::is_specialized &&
+ std::numeric_limits<__T2>::is_specialized,
+ double>::type
+remquo(__T1 __x, __T2 __y, int *__quo) {
+ return std::remquo((double)__x, (double)__y, __quo);
+}
+
+template <typename __T>
+__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
+ double>::type
+scalbln(__T __x, long __exp) {
+ return std::scalbln((double)__x, __exp);
+}
+
+template <typename __T>
+__DEVICE__ typename __clang_cuda_enable_if<std::numeric_limits<__T>::is_integer,
+ double>::type
+scalbn(__T __x, int __exp) {
+ return std::scalbn((double)__x, __exp);
+}
+
+// We need to define these overloads in exactly the namespace our standard
+// library uses (including the right inline namespace), otherwise they won't be
+// picked up by other functions in the standard library (e.g. functions in
+// <complex>). Thus the ugliness below.
+#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
+_LIBCPP_BEGIN_NAMESPACE_STD
+#else
+namespace std {
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+#endif
+#endif
+
+// Pull the new overloads we defined above into namespace std.
+using ::acos;
+using ::acosh;
+using ::asin;
+using ::asinh;
+using ::atan;
+using ::atan2;
+using ::atanh;
+using ::cbrt;
+using ::ceil;
+using ::copysign;
+using ::cos;
+using ::cosh;
+using ::erf;
+using ::erfc;
+using ::exp;
+using ::exp2;
+using ::expm1;
+using ::fabs;
+using ::fdim;
+using ::floor;
+using ::fma;
+using ::fmax;
+using ::fmin;
+using ::fmod;
+using ::fpclassify;
+using ::frexp;
+using ::hypot;
+using ::ilogb;
+using ::isfinite;
+using ::isgreater;
+using ::isgreaterequal;
+using ::isless;
+using ::islessequal;
+using ::islessgreater;
+using ::isnormal;
+using ::isunordered;
+using ::ldexp;
+using ::lgamma;
+using ::llrint;
+using ::llround;
+using ::log;
+using ::log10;
+using ::log1p;
+using ::log2;
+using ::logb;
+using ::lrint;
+using ::lround;
+using ::nearbyint;
+using ::nextafter;
+using ::nexttoward;
+using ::pow;
+using ::remainder;
+using ::remquo;
+using ::rint;
+using ::round;
+using ::scalbln;
+using ::scalbn;
+using ::signbit;
+using ::sin;
+using ::sinh;
+using ::sqrt;
+using ::tan;
+using ::tanh;
+using ::tgamma;
+using ::trunc;
+
+// Well this is fun: We need to pull these symbols in for libc++, but we can't
+// pull them in with libstdc++, because its ::isinf and ::isnan are different
+// than its std::isinf and std::isnan.
+#ifndef __GLIBCXX__
+using ::isinf;
+using ::isnan;
+#endif
+
+// Finally, pull the "foobarf" functions that CUDA defines in its headers into
+// namespace std.
+using ::acosf;
+using ::acoshf;
+using ::asinf;
+using ::asinhf;
+using ::atan2f;
+using ::atanf;
+using ::atanhf;
+using ::cbrtf;
+using ::ceilf;
+using ::copysignf;
+using ::cosf;
+using ::coshf;
+using ::erfcf;
+using ::erff;
+using ::exp2f;
+using ::expf;
+using ::expm1f;
+using ::fabsf;
+using ::fdimf;
+using ::floorf;
+using ::fmaf;
+using ::fmaxf;
+using ::fminf;
+using ::fmodf;
+using ::frexpf;
+using ::hypotf;
+using ::ilogbf;
+using ::ldexpf;
+using ::lgammaf;
+using ::llrintf;
+using ::llroundf;
+using ::log10f;
+using ::log1pf;
+using ::log2f;
+using ::logbf;
+using ::logf;
+using ::lrintf;
+using ::lroundf;
+using ::modff;
+using ::nearbyintf;
+using ::nextafterf;
+using ::nexttowardf;
+using ::nexttowardf;
+using ::powf;
+using ::remainderf;
+using ::remquof;
+using ::rintf;
+using ::roundf;
+using ::scalblnf;
+using ::scalbnf;
+using ::sinf;
+using ::sinhf;
+using ::sqrtf;
+using ::tanf;
+using ::tanhf;
+using ::tgammaf;
+using ::truncf;
+
+#ifdef _LIBCPP_END_NAMESPACE_STD
+_LIBCPP_END_NAMESPACE_STD
+#else
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_END_NAMESPACE_VERSION
+#endif
+} // namespace std
+#endif
+
#undef __DEVICE__
#endif
diff --git a/lib/Headers/__clang_cuda_complex_builtins.h b/lib/Headers/__clang_cuda_complex_builtins.h
new file mode 100644
index 000000000000..beef7deff87f
--- /dev/null
+++ b/lib/Headers/__clang_cuda_complex_builtins.h
@@ -0,0 +1,203 @@
+/*===-- __clang_cuda_complex_builtins - CUDA impls of runtime complex fns ---===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_CUDA_COMPLEX_BUILTINS
+#define __CLANG_CUDA_COMPLEX_BUILTINS
+
+// This header defines __muldc3, __mulsc3, __divdc3, and __divsc3. These are
+// libgcc functions that clang assumes are available when compiling c99 complex
+// operations. (These implementations come from libc++, and have been modified
+// to work with CUDA.)
+
+extern "C" inline __device__ double _Complex __muldc3(double __a, double __b,
+ double __c, double __d) {
+ double __ac = __a * __c;
+ double __bd = __b * __d;
+ double __ad = __a * __d;
+ double __bc = __b * __c;
+ double _Complex z;
+ __real__(z) = __ac - __bd;
+ __imag__(z) = __ad + __bc;
+ if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
+ int __recalc = 0;
+ if (std::isinf(__a) || std::isinf(__b)) {
+ __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
+ __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
+ if (std::isnan(__c))
+ __c = std::copysign(0, __c);
+ if (std::isnan(__d))
+ __d = std::copysign(0, __d);
+ __recalc = 1;
+ }
+ if (std::isinf(__c) || std::isinf(__d)) {
+ __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
+ __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
+ if (std::isnan(__a))
+ __a = std::copysign(0, __a);
+ if (std::isnan(__b))
+ __b = std::copysign(0, __b);
+ __recalc = 1;
+ }
+ if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) ||
+ std::isinf(__ad) || std::isinf(__bc))) {
+ if (std::isnan(__a))
+ __a = std::copysign(0, __a);
+ if (std::isnan(__b))
+ __b = std::copysign(0, __b);
+ if (std::isnan(__c))
+ __c = std::copysign(0, __c);
+ if (std::isnan(__d))
+ __d = std::copysign(0, __d);
+ __recalc = 1;
+ }
+ if (__recalc) {
+ // Can't use std::numeric_limits<double>::infinity() -- that doesn't have
+ // a device overload (and isn't constexpr before C++11, naturally).
+ __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);
+ __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);
+ }
+ }
+ return z;
+}
+
+extern "C" inline __device__ float _Complex __mulsc3(float __a, float __b,
+ float __c, float __d) {
+ float __ac = __a * __c;
+ float __bd = __b * __d;
+ float __ad = __a * __d;
+ float __bc = __b * __c;
+ float _Complex z;
+ __real__(z) = __ac - __bd;
+ __imag__(z) = __ad + __bc;
+ if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
+ int __recalc = 0;
+ if (std::isinf(__a) || std::isinf(__b)) {
+ __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
+ __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
+ if (std::isnan(__c))
+ __c = std::copysign(0, __c);
+ if (std::isnan(__d))
+ __d = std::copysign(0, __d);
+ __recalc = 1;
+ }
+ if (std::isinf(__c) || std::isinf(__d)) {
+ __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
+ __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
+ if (std::isnan(__a))
+ __a = std::copysign(0, __a);
+ if (std::isnan(__b))
+ __b = std::copysign(0, __b);
+ __recalc = 1;
+ }
+ if (!__recalc && (std::isinf(__ac) || std::isinf(__bd) ||
+ std::isinf(__ad) || std::isinf(__bc))) {
+ if (std::isnan(__a))
+ __a = std::copysign(0, __a);
+ if (std::isnan(__b))
+ __b = std::copysign(0, __b);
+ if (std::isnan(__c))
+ __c = std::copysign(0, __c);
+ if (std::isnan(__d))
+ __d = std::copysign(0, __d);
+ __recalc = 1;
+ }
+ if (__recalc) {
+ __real__(z) = __builtin_huge_valf() * (__a * __c - __b * __d);
+ __imag__(z) = __builtin_huge_valf() * (__a * __d + __b * __c);
+ }
+ }
+ return z;
+}
+
+extern "C" inline __device__ double _Complex __divdc3(double __a, double __b,
+ double __c, double __d) {
+ int __ilogbw = 0;
+ // Can't use std::max, because that's defined in <algorithm>, and we don't
+ // want to pull that in for every compile. The CUDA headers define
+ // ::max(float, float) and ::max(double, double), which is sufficient for us.
+ double __logbw = std::logb(max(std::abs(__c), std::abs(__d)));
+ if (std::isfinite(__logbw)) {
+ __ilogbw = (int)__logbw;
+ __c = std::scalbn(__c, -__ilogbw);
+ __d = std::scalbn(__d, -__ilogbw);
+ }
+ double __denom = __c * __c + __d * __d;
+ double _Complex z;
+ __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
+ __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
+ if ((__denom == 0.0) && (!std::isnan(__a) || !std::isnan(__b))) {
+ __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a;
+ __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b;
+ } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) &&
+ std::isfinite(__d)) {
+ __a = std::copysign(std::isinf(__a) ? 1.0 : 0.0, __a);
+ __b = std::copysign(std::isinf(__b) ? 1.0 : 0.0, __b);
+ __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
+ __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
+ } else if (std::isinf(__logbw) && __logbw > 0.0 && std::isfinite(__a) &&
+ std::isfinite(__b)) {
+ __c = std::copysign(std::isinf(__c) ? 1.0 : 0.0, __c);
+ __d = std::copysign(std::isinf(__d) ? 1.0 : 0.0, __d);
+ __real__(z) = 0.0 * (__a * __c + __b * __d);
+ __imag__(z) = 0.0 * (__b * __c - __a * __d);
+ }
+ }
+ return z;
+}
+
+extern "C" inline __device__ float _Complex __divsc3(float __a, float __b,
+ float __c, float __d) {
+ int __ilogbw = 0;
+ float __logbw = std::logb(max(std::abs(__c), std::abs(__d)));
+ if (std::isfinite(__logbw)) {
+ __ilogbw = (int)__logbw;
+ __c = std::scalbn(__c, -__ilogbw);
+ __d = std::scalbn(__d, -__ilogbw);
+ }
+ float __denom = __c * __c + __d * __d;
+ float _Complex z;
+ __real__(z) = std::scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
+ __imag__(z) = std::scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (std::isnan(__real__(z)) && std::isnan(__imag__(z))) {
+ if ((__denom == 0) && (!std::isnan(__a) || !std::isnan(__b))) {
+ __real__(z) = std::copysign(__builtin_huge_valf(), __c) * __a;
+ __imag__(z) = std::copysign(__builtin_huge_valf(), __c) * __b;
+ } else if ((std::isinf(__a) || std::isinf(__b)) && std::isfinite(__c) &&
+ std::isfinite(__d)) {
+ __a = std::copysign(std::isinf(__a) ? 1 : 0, __a);
+ __b = std::copysign(std::isinf(__b) ? 1 : 0, __b);
+ __real__(z) = __builtin_huge_valf() * (__a * __c + __b * __d);
+ __imag__(z) = __builtin_huge_valf() * (__b * __c - __a * __d);
+ } else if (std::isinf(__logbw) && __logbw > 0 && std::isfinite(__a) &&
+ std::isfinite(__b)) {
+ __c = std::copysign(std::isinf(__c) ? 1 : 0, __c);
+ __d = std::copysign(std::isinf(__d) ? 1 : 0, __d);
+ __real__(z) = 0 * (__a * __c + __b * __d);
+ __imag__(z) = 0 * (__b * __c - __a * __d);
+ }
+ }
+ return z;
+}
+
+#endif // __CLANG_CUDA_COMPLEX_BUILTINS
diff --git a/lib/Headers/__clang_cuda_math_forward_declares.h b/lib/Headers/__clang_cuda_math_forward_declares.h
index 3f2834d95000..49c805151d65 100644
--- a/lib/Headers/__clang_cuda_math_forward_declares.h
+++ b/lib/Headers/__clang_cuda_math_forward_declares.h
@@ -140,6 +140,7 @@ __DEVICE__ long lrint(double);
__DEVICE__ long lrint(float);
__DEVICE__ long lround(double);
__DEVICE__ long lround(float);
+__DEVICE__ long long llround(float); // No llround(double).
__DEVICE__ double modf(double, double *);
__DEVICE__ float modf(float, float *);
__DEVICE__ double nan(const char *);
@@ -149,7 +150,8 @@ __DEVICE__ float nearbyint(float);
__DEVICE__ double nextafter(double, double);
__DEVICE__ float nextafter(float, float);
__DEVICE__ double nexttoward(double, double);
-__DEVICE__ float nexttoward(float, float);
+__DEVICE__ float nexttoward(float, double);
+__DEVICE__ float nexttowardf(float, double);
__DEVICE__ double pow(double, double);
__DEVICE__ double pow(double, int);
__DEVICE__ float pow(float, float);
@@ -183,7 +185,19 @@ __DEVICE__ float tgamma(float);
__DEVICE__ double trunc(double);
__DEVICE__ float trunc(float);
+// We need to define these overloads in exactly the namespace our standard
+// library uses (including the right inline namespace), otherwise they won't be
+// picked up by other functions in the standard library (e.g. functions in
+// <complex>). Thus the ugliness below.
+#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
+_LIBCPP_BEGIN_NAMESPACE_STD
+#else
namespace std {
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+#endif
+#endif
+
using ::abs;
using ::acos;
using ::acosh;
@@ -235,6 +249,7 @@ using ::log2;
using ::logb;
using ::lrint;
using ::lround;
+using ::llround;
using ::modf;
using ::nan;
using ::nanf;
@@ -256,7 +271,15 @@ using ::tan;
using ::tanh;
using ::tgamma;
using ::trunc;
+
+#ifdef _LIBCPP_END_NAMESPACE_STD
+_LIBCPP_END_NAMESPACE_STD
+#else
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_END_NAMESPACE_VERSION
+#endif
} // namespace std
+#endif
#pragma pop_macro("__DEVICE__")
diff --git a/lib/Headers/__clang_cuda_runtime_wrapper.h b/lib/Headers/__clang_cuda_runtime_wrapper.h
index 6445f9b76b8f..205e15b40b5d 100644
--- a/lib/Headers/__clang_cuda_runtime_wrapper.h
+++ b/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -62,7 +62,7 @@
#include "cuda.h"
#if !defined(CUDA_VERSION)
#error "cuda.h did not define CUDA_VERSION"
-#elif CUDA_VERSION < 7000 || CUDA_VERSION > 7050
+#elif CUDA_VERSION < 7000 || CUDA_VERSION > 8000
#error "Unsupported CUDA version!"
#endif
@@ -72,9 +72,9 @@
#define __CUDA_ARCH__ 350
#endif
-#include "cuda_builtin_vars.h"
+#include "__clang_cuda_builtin_vars.h"
-// No need for device_launch_parameters.h as cuda_builtin_vars.h above
+// No need for device_launch_parameters.h as __clang_cuda_builtin_vars.h above
// has taken care of builtin variables declared in the file.
#define __DEVICE_LAUNCH_PARAMETERS_H__
@@ -113,6 +113,7 @@
#undef __cxa_vec_ctor
#undef __cxa_vec_cctor
#undef __cxa_vec_dtor
+#undef __cxa_vec_new
#undef __cxa_vec_new2
#undef __cxa_vec_new3
#undef __cxa_vec_delete2
@@ -120,6 +121,15 @@
#undef __cxa_vec_delete3
#undef __cxa_pure_virtual
+// math_functions.hpp expects this host function be defined on MacOS, but it
+// ends up not being there because of the games we play here. Just define it
+// ourselves; it's simple enough.
+#ifdef __APPLE__
+inline __host__ double __signbitd(double x) {
+ return std::signbit(x);
+}
+#endif
+
// We need decls for functions in CUDA's libdevice with __device__
// attribute only. Alas they come either as __host__ __device__ or
// with no attributes at all. To work around that, define __CUDA_RTC__
@@ -135,6 +145,21 @@
// the headers we're about to include.
#define __host__ UNEXPECTED_HOST_ATTRIBUTE
+// CUDA 8.0.41 relies on __USE_FAST_MATH__ and __CUDA_PREC_DIV's values.
+// Previous versions used to check whether they are defined or not.
+// CU_DEVICE_INVALID macro is only defined in 8.0.41, so we use it
+// here to detect the switch.
+
+#if defined(CU_DEVICE_INVALID)
+#if !defined(__USE_FAST_MATH__)
+#define __USE_FAST_MATH__ 0
+#endif
+
+#if !defined(__CUDA_PREC_DIV)
+#define __CUDA_PREC_DIV 0
+#endif
+#endif
+
// device_functions.hpp and math_functions*.hpp use 'static
// __forceinline__' (with no __device__) for definitions of device
// functions. Temporarily redefine __forceinline__ to include
@@ -151,7 +176,7 @@
// slow divides), so we need to scope our define carefully here.
#pragma push_macro("__USE_FAST_MATH__")
#if defined(__CLANG_CUDA_APPROX_TRANSCENDENTALS__)
-#define __USE_FAST_MATH__
+#define __USE_FAST_MATH__ 1
#endif
#include "math_functions.hpp"
#pragma pop_macro("__USE_FAST_MATH__")
@@ -267,8 +292,8 @@ __device__ static inline void *malloc(size_t __size) {
}
} // namespace std
-// Out-of-line implementations from cuda_builtin_vars.h. These need to come
-// after we've pulled in the definition of uint3 and dim3.
+// Out-of-line implementations from __clang_cuda_builtin_vars.h. These need to
+// come after we've pulled in the definition of uint3 and dim3.
__device__ inline __cuda_builtin_threadIdx_t::operator uint3() const {
uint3 ret;
@@ -296,13 +321,14 @@ __device__ inline __cuda_builtin_gridDim_t::operator dim3() const {
#include <__clang_cuda_cmath.h>
#include <__clang_cuda_intrinsics.h>
+#include <__clang_cuda_complex_builtins.h>
// curand_mtgp32_kernel helpfully redeclares blockDim and threadIdx in host
// mode, giving them their "proper" types of dim3 and uint3. This is
-// incompatible with the types we give in cuda_builtin_vars.h. As as hack,
-// force-include the header (nvcc doesn't include it by default) but redefine
-// dim3 and uint3 to our builtin types. (Thankfully dim3 and uint3 are only
-// used here for the redeclarations of blockDim and threadIdx.)
+// incompatible with the types we give in __clang_cuda_builtin_vars.h. As as
+// hack, force-include the header (nvcc doesn't include it by default) but
+// redefine dim3 and uint3 to our builtin types. (Thankfully dim3 and uint3 are
+// only used here for the redeclarations of blockDim and threadIdx.)
#pragma push_macro("dim3")
#pragma push_macro("uint3")
#define dim3 __cuda_builtin_blockDim_t
diff --git a/lib/Headers/__wmmintrin_aes.h b/lib/Headers/__wmmintrin_aes.h
index 211518eb2884..3a2ee1b2ef2e 100644
--- a/lib/Headers/__wmmintrin_aes.h
+++ b/lib/Headers/__wmmintrin_aes.h
@@ -35,7 +35,7 @@
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VAESENC instruction.
+/// This intrinsic corresponds to the <c> VAESENC </c> instruction.
///
/// \param __V
/// A 128-bit integer vector containing the state value.
@@ -55,7 +55,7 @@ _mm_aesenc_si128(__m128i __V, __m128i __R)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VAESENCLAST instruction.
+/// This intrinsic corresponds to the <c> VAESENCLAST </c> instruction.
///
/// \param __V
/// A 128-bit integer vector containing the state value.
@@ -75,7 +75,7 @@ _mm_aesenclast_si128(__m128i __V, __m128i __R)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VAESDEC instruction.
+/// This intrinsic corresponds to the <c> VAESDEC </c> instruction.
///
/// \param __V
/// A 128-bit integer vector containing the state value.
@@ -95,7 +95,7 @@ _mm_aesdec_si128(__m128i __V, __m128i __R)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VAESDECLAST instruction.
+/// This intrinsic corresponds to the <c> VAESDECLAST </c> instruction.
///
/// \param __V
/// A 128-bit integer vector containing the state value.
@@ -114,7 +114,7 @@ _mm_aesdeclast_si128(__m128i __V, __m128i __R)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VAESIMC instruction.
+/// This intrinsic corresponds to the <c> VAESIMC </c> instruction.
///
/// \param __V
/// A 128-bit integer vector containing the expanded key.
@@ -136,7 +136,7 @@ _mm_aesimc_si128(__m128i __V)
/// __m128i _mm_aeskeygenassist_si128(__m128i C, const int R);
/// \endcode
///
-/// This intrinsic corresponds to the \c AESKEYGENASSIST instruction.
+/// This intrinsic corresponds to the <c> AESKEYGENASSIST </c> instruction.
///
/// \param C
/// A 128-bit integer vector that is used to generate the AES encryption key.
diff --git a/lib/Headers/__wmmintrin_pclmul.h b/lib/Headers/__wmmintrin_pclmul.h
index d4e073f40688..e9c6a9f6d415 100644
--- a/lib/Headers/__wmmintrin_pclmul.h
+++ b/lib/Headers/__wmmintrin_pclmul.h
@@ -34,7 +34,7 @@
/// __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I);
/// \endcode
///
-/// This intrinsic corresponds to the \c VPCLMULQDQ instruction.
+/// This intrinsic corresponds to the <c> VPCLMULQDQ </c> instruction.
///
/// \param __X
/// A 128-bit vector of [2 x i64] containing one of the source operands.
@@ -42,13 +42,12 @@
/// A 128-bit vector of [2 x i64] containing one of the source operands.
/// \param __I
/// An immediate value specifying which 64-bit values to select from the
-/// operands.
-/// Bit 0 is used to select a value from operand __X,
-/// and bit 4 is used to select a value from operand __Y:
-/// Bit[0]=0 indicates that bits[63:0] of operand __X are used.
-/// Bit[0]=1 indicates that bits[127:64] of operand __X are used.
-/// Bit[4]=0 indicates that bits[63:0] of operand __Y are used.
-/// Bit[4]=1 indicates that bits[127:64] of operand __Y are used.
+/// operands. Bit 0 is used to select a value from operand \a __X, and bit
+/// 4 is used to select a value from operand \a __Y: \n
+/// Bit[0]=0 indicates that bits[63:0] of operand \a __X are used. \n
+/// Bit[0]=1 indicates that bits[127:64] of operand \a __X are used. \n
+/// Bit[4]=0 indicates that bits[63:0] of operand \a __Y are used. \n
+/// Bit[4]=1 indicates that bits[127:64] of operand \a __Y are used.
/// \returns The 128-bit integer vector containing the result of the carry-less
/// multiplication of the selected 64-bit values.
#define _mm_clmulepi64_si128(__X, __Y, __I) \
diff --git a/lib/Headers/altivec.h b/lib/Headers/altivec.h
index 74a1914ce83b..d1d1d8026325 100644
--- a/lib/Headers/altivec.h
+++ b/lib/Headers/altivec.h
@@ -34,8 +34,31 @@
#define __CR6_LT 2
#define __CR6_LT_REV 3
+/* Constants for vec_test_data_class */
+#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 0)
+#define __VEC_CLASS_FP_SUBNORMAL_P (1 << 1)
+#define __VEC_CLASS_FP_SUBNORMAL (__VEC_CLASS_FP_SUBNORMAL_P | \
+ __VEC_CLASS_FP_SUBNORMAL_N)
+#define __VEC_CLASS_FP_ZERO_N (1<<2)
+#define __VEC_CLASS_FP_ZERO_P (1<<3)
+#define __VEC_CLASS_FP_ZERO (__VEC_CLASS_FP_ZERO_P | \
+ __VEC_CLASS_FP_ZERO_N)
+#define __VEC_CLASS_FP_INFINITY_N (1<<4)
+#define __VEC_CLASS_FP_INFINITY_P (1<<5)
+#define __VEC_CLASS_FP_INFINITY (__VEC_CLASS_FP_INFINITY_P | \
+ __VEC_CLASS_FP_INFINITY_N)
+#define __VEC_CLASS_FP_NAN (1<<6)
+#define __VEC_CLASS_FP_NOT_NORMAL (__VEC_CLASS_FP_NAN | \
+ __VEC_CLASS_FP_SUBNORMAL | \
+ __VEC_CLASS_FP_ZERO | \
+ __VEC_CLASS_FP_INFINITY)
+
#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+#ifdef __POWER9_VECTOR__
+#include <stddef.h>
+#endif
+
static __inline__ vector signed char __ATTRS_o_ai vec_perm(
vector signed char __a, vector signed char __b, vector unsigned char __c);
@@ -134,7 +157,7 @@ static __inline__ vector float __ATTRS_o_ai vec_abs(vector float __a) {
#endif
}
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#ifdef __VSX__
static __inline__ vector double __ATTRS_o_ai vec_abs(vector double __a) {
return __builtin_vsx_xvabsdp(__a);
}
@@ -163,6 +186,26 @@ vec_abss(vector signed int __a) {
__a, __builtin_altivec_vsubsws((vector signed int)(0), __a));
}
+/* vec_absd */
+#if defined(__POWER9_VECTOR__)
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_absd(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vabsdub(__a, __b);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_absd(vector unsigned short __a, vector unsigned short __b) {
+ return __builtin_altivec_vabsduh(__a, __b);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_absd(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_altivec_vabsduw(__a, __b);
+}
+
+#endif /* End __POWER9_VECTOR__ */
+
/* vec_add */
static __inline__ vector signed char __ATTRS_o_ai
@@ -305,6 +348,22 @@ vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
}
#endif
+static __inline__ vector signed int __ATTRS_o_ai
+vec_adde(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ vector signed int __mask = {1, 1, 1, 1};
+ vector signed int __carry = __c & __mask;
+ return vec_add(vec_add(__a, __b), __carry);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_adde(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ vector unsigned int __mask = {1, 1, 1, 1};
+ vector unsigned int __carry = __c & __mask;
+ return vec_add(vec_add(__a, __b), __carry);
+}
+
/* vec_addec */
#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
@@ -319,6 +378,50 @@ vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
vector unsigned __int128 __c) {
return __builtin_altivec_vaddecuq(__a, __b, __c);
}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_addec(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+
+ signed int __result[4];
+ for (int i = 0; i < 4; i++) {
+ unsigned int __tempa = (unsigned int) __a[i];
+ unsigned int __tempb = (unsigned int) __b[i];
+ unsigned int __tempc = (unsigned int) __c[i];
+ __tempc = __tempc & 0x00000001;
+ unsigned long long __longa = (unsigned long long) __tempa;
+ unsigned long long __longb = (unsigned long long) __tempb;
+ unsigned long long __longc = (unsigned long long) __tempc;
+ unsigned long long __sum = __longa + __longb + __longc;
+ unsigned long long __res = (__sum >> 32) & 0x01;
+ unsigned long long __tempres = (unsigned int) __res;
+ __result[i] = (signed int) __tempres;
+ }
+
+ vector signed int ret = { __result[0], __result[1], __result[2], __result[3] };
+ return ret;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_addec(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+
+ unsigned int __result[4];
+ for (int i = 0; i < 4; i++) {
+ unsigned int __tempc = __c[i] & 1;
+ unsigned long long __longa = (unsigned long long) __a[i];
+ unsigned long long __longb = (unsigned long long) __b[i];
+ unsigned long long __longc = (unsigned long long) __tempc;
+ unsigned long long __sum = __longa + __longb + __longc;
+ unsigned long long __res = (__sum >> 32) & 0x01;
+ unsigned long long __tempres = (unsigned int) __res;
+ __result[i] = (signed int) __tempres;
+ }
+
+ vector unsigned int ret = { __result[0], __result[1], __result[2], __result[3] };
+ return ret;
+}
+
#endif
/* vec_vaddubm */
@@ -1544,6 +1647,12 @@ vec_cmpeq(vector unsigned char __a, vector unsigned char __b) {
(vector char)__b);
}
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpeq(vector bool char __a, vector bool char __b) {
+ return (vector bool char)__builtin_altivec_vcmpequb((vector char)__a,
+ (vector char)__b);
+}
+
static __inline__ vector bool short __ATTRS_o_ai vec_cmpeq(vector short __a,
vector short __b) {
return (vector bool short)__builtin_altivec_vcmpequh(__a, __b);
@@ -1555,6 +1664,12 @@ vec_cmpeq(vector unsigned short __a, vector unsigned short __b) {
(vector short)__b);
}
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpeq(vector bool short __a, vector bool short __b) {
+ return (vector bool short)__builtin_altivec_vcmpequh((vector short)__a,
+ (vector short)__b);
+}
+
static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector int __a,
vector int __b) {
return (vector bool int)__builtin_altivec_vcmpequw(__a, __b);
@@ -1566,6 +1681,12 @@ vec_cmpeq(vector unsigned int __a, vector unsigned int __b) {
(vector int)__b);
}
+static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector bool int __a,
+ vector bool int __b) {
+ return (vector bool int)__builtin_altivec_vcmpequw((vector int)__a,
+ (vector int)__b);
+}
+
#ifdef __POWER8_VECTOR__
static __inline__ vector bool long long __ATTRS_o_ai
vec_cmpeq(vector signed long long __a, vector signed long long __b) {
@@ -1577,6 +1698,13 @@ vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
return (vector bool long long)__builtin_altivec_vcmpequd(
(vector long long)__a, (vector long long)__b);
}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)__builtin_altivec_vcmpequd(
+ (vector long long)__a, (vector long long)__b);
+}
+
#endif
static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
@@ -1595,6 +1723,199 @@ vec_cmpeq(vector double __a, vector double __b) {
}
#endif
+#ifdef __POWER9_VECTOR__
+/* vec_cmpne */
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector bool char __a, vector bool char __b) {
+ return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector signed char __a, vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpne(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpneb((vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector bool short __a, vector bool short __b) {
+ return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector signed short __a, vector signed short __b) {
+ return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpne(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpneh((vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector bool int __a, vector bool int __b) {
+ return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector signed int __a, vector signed int __b) {
+ return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)
+ ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)
+ ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)
+ ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpne(vector float __a, vector float __b) {
+ return (vector bool int)__builtin_altivec_vcmpnew((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector double __a, vector double __b) {
+ return (vector bool long long)
+ ~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
+}
+
+/* vec_cmpnez */
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpnez(vector signed char __a, vector signed char __b) {
+ return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ vector bool char __ATTRS_o_ai
+vec_cmpnez(vector unsigned char __a, vector unsigned char __b) {
+ return (vector bool char)__builtin_altivec_vcmpnezb((vector char)__a,
+ (vector char)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpnez(vector signed short __a, vector signed short __b) {
+ return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_cmpnez(vector unsigned short __a, vector unsigned short __b) {
+ return (vector bool short)__builtin_altivec_vcmpnezh((vector short)__a,
+ (vector short)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpnez(vector signed int __a, vector signed int __b) {
+ return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_cmpnez(vector unsigned int __a, vector unsigned int __b) {
+ return (vector bool int)__builtin_altivec_vcmpnezw((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ signed int __ATTRS_o_ai
+vec_cntlz_lsbb(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vctzlsbb(__a);
+#else
+ return __builtin_altivec_vclzlsbb(__a);
+#endif
+}
+
+static __inline__ signed int __ATTRS_o_ai
+vec_cntlz_lsbb(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vctzlsbb(__a);
+#else
+ return __builtin_altivec_vclzlsbb(__a);
+#endif
+}
+
+static __inline__ signed int __ATTRS_o_ai
+vec_cnttz_lsbb(vector signed char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vclzlsbb(__a);
+#else
+ return __builtin_altivec_vctzlsbb(__a);
+#endif
+}
+
+static __inline__ signed int __ATTRS_o_ai
+vec_cnttz_lsbb(vector unsigned char __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_altivec_vclzlsbb(__a);
+#else
+ return __builtin_altivec_vctzlsbb(__a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_parity_lsbb(vector unsigned int __a) {
+ return __builtin_altivec_vprtybw(__a);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_parity_lsbb(vector signed int __a) {
+ return __builtin_altivec_vprtybw(__a);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_parity_lsbb(vector unsigned __int128 __a) {
+ return __builtin_altivec_vprtybq(__a);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_parity_lsbb(vector signed __int128 __a) {
+ return __builtin_altivec_vprtybq(__a);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_parity_lsbb(vector unsigned long long __a) {
+ return __builtin_altivec_vprtybd(__a);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_parity_lsbb(vector signed long long __a) {
+ return __builtin_altivec_vprtybd(__a);
+}
+
+#endif
+
/* vec_cmpgt */
static __inline__ vector bool char __ATTRS_o_ai
@@ -1882,6 +2203,41 @@ vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
return vec_cmpgt(__b, __a);
}
+/* vec_popcnt */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_popcnt(vector signed char __a) {
+ return __builtin_altivec_vpopcntb(__a);
+}
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_popcnt(vector unsigned char __a) {
+ return __builtin_altivec_vpopcntb(__a);
+}
+static __inline__ vector signed short __ATTRS_o_ai
+vec_popcnt(vector signed short __a) {
+ return __builtin_altivec_vpopcnth(__a);
+}
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_popcnt(vector unsigned short __a) {
+ return __builtin_altivec_vpopcnth(__a);
+}
+static __inline__ vector signed int __ATTRS_o_ai
+vec_popcnt(vector signed int __a) {
+ return __builtin_altivec_vpopcntw(__a);
+}
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_popcnt(vector unsigned int __a) {
+ return __builtin_altivec_vpopcntw(__a);
+}
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_popcnt(vector signed long long __a) {
+ return __builtin_altivec_vpopcntd(__a);
+}
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_popcnt(vector unsigned long long __a) {
+ return __builtin_altivec_vpopcntd(__a);
+}
+
/* vec_cntlz */
static __inline__ vector signed char __ATTRS_o_ai
@@ -1918,6 +2274,603 @@ vec_cntlz(vector unsigned long long __a) {
}
#endif
+#ifdef __POWER9_VECTOR__
+
+/* vec_cnttz */
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_cnttz(vector signed char __a) {
+ return __builtin_altivec_vctzb(__a);
+}
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_cnttz(vector unsigned char __a) {
+ return __builtin_altivec_vctzb(__a);
+}
+static __inline__ vector signed short __ATTRS_o_ai
+vec_cnttz(vector signed short __a) {
+ return __builtin_altivec_vctzh(__a);
+}
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_cnttz(vector unsigned short __a) {
+ return __builtin_altivec_vctzh(__a);
+}
+static __inline__ vector signed int __ATTRS_o_ai
+vec_cnttz(vector signed int __a) {
+ return __builtin_altivec_vctzw(__a);
+}
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_cnttz(vector unsigned int __a) {
+ return __builtin_altivec_vctzw(__a);
+}
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_cnttz(vector signed long long __a) {
+ return __builtin_altivec_vctzd(__a);
+}
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_cnttz(vector unsigned long long __a) {
+ return __builtin_altivec_vctzd(__a);
+}
+
+/* vec_first_match_index */
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_index(vector signed char __a, vector signed char __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 3;
+ }
+ return __res[0] >> 3;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_index(vector unsigned char __a, vector unsigned char __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 3;
+ }
+ return __res[0] >> 3;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_index(vector signed short __a, vector signed short __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 4;
+ }
+ return __res[0] >> 4;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_index(vector unsigned short __a, vector unsigned short __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 4;
+ }
+ return __res[0] >> 4;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_index(vector signed int __a, vector signed int __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 5;
+ }
+ return __res[0] >> 5;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_index(vector unsigned int __a, vector unsigned int __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpeq(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpeq(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 5;
+ }
+ return __res[0] >> 5;
+}
+
+/* vec_first_match_or_eos_index */
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_or_eos_index(vector signed char __a, vector signed char __b) {
+ /* Compare the result of the comparison of two vectors with either and OR the
+ result. Either the elements are equal or one will equal the comparison
+ result if either is zero.
+ */
+ vector bool char __tmp1 = vec_cmpeq(__a, __b);
+ vector bool char __tmp2 = __tmp1 |
+ vec_cmpeq((vector signed char)__tmp1, __a) |
+ vec_cmpeq((vector signed char)__tmp1, __b);
+
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)__tmp2);
+#else
+ vec_cntlz((vector unsigned long long)__tmp2);
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 3;
+ }
+ return __res[0] >> 3;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_or_eos_index(vector unsigned char __a,
+ vector unsigned char __b) {
+ vector bool char __tmp1 = vec_cmpeq(__a, __b);
+ vector bool char __tmp2 = __tmp1 |
+ vec_cmpeq((vector unsigned char)__tmp1, __a) |
+ vec_cmpeq((vector unsigned char)__tmp1, __b);
+
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)__tmp2);
+#else
+ vec_cntlz((vector unsigned long long)__tmp2);
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 3;
+ }
+ return __res[0] >> 3;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_or_eos_index(vector signed short __a, vector signed short __b) {
+ vector bool short __tmp1 = vec_cmpeq(__a, __b);
+ vector bool short __tmp2 = __tmp1 |
+ vec_cmpeq((vector signed short)__tmp1, __a) |
+ vec_cmpeq((vector signed short)__tmp1, __b);
+
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)__tmp2);
+#else
+ vec_cntlz((vector unsigned long long)__tmp2);
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 4;
+ }
+ return __res[0] >> 4;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_or_eos_index(vector unsigned short __a,
+ vector unsigned short __b) {
+ vector bool short __tmp1 = vec_cmpeq(__a, __b);
+ vector bool short __tmp2 = __tmp1 |
+ vec_cmpeq((vector unsigned short)__tmp1, __a) |
+ vec_cmpeq((vector unsigned short)__tmp1, __b);
+
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)__tmp2);
+#else
+ vec_cntlz((vector unsigned long long)__tmp2);
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 4;
+ }
+ return __res[0] >> 4;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_or_eos_index(vector signed int __a, vector signed int __b) {
+ vector bool int __tmp1 = vec_cmpeq(__a, __b);
+ vector bool int __tmp2 = __tmp1 | vec_cmpeq((vector signed int)__tmp1, __a) |
+ vec_cmpeq((vector signed int)__tmp1, __b);
+
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)__tmp2);
+#else
+ vec_cntlz((vector unsigned long long)__tmp2);
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 5;
+ }
+ return __res[0] >> 5;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_match_or_eos_index(vector unsigned int __a, vector unsigned int __b) {
+ vector bool int __tmp1 = vec_cmpeq(__a, __b);
+ vector bool int __tmp2 = __tmp1 |
+ vec_cmpeq((vector unsigned int)__tmp1, __a) |
+ vec_cmpeq((vector unsigned int)__tmp1, __b);
+
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)__tmp2);
+#else
+ vec_cntlz((vector unsigned long long)__tmp2);
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 5;
+ }
+ return __res[0] >> 5;
+}
+
+/* vec_first_mismatch_index */
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_index(vector signed char __a, vector signed char __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 3;
+ }
+ return __res[0] >> 3;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_index(vector unsigned char __a, vector unsigned char __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 3;
+ }
+ return __res[0] >> 3;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_index(vector signed short __a, vector signed short __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 4;
+ }
+ return __res[0] >> 4;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_index(vector unsigned short __a, vector unsigned short __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 4;
+ }
+ return __res[0] >> 4;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_index(vector signed int __a, vector signed int __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 5;
+ }
+ return __res[0] >> 5;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_index(vector unsigned int __a, vector unsigned int __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpne(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpne(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 5;
+ }
+ return __res[0] >> 5;
+}
+
+/* vec_first_mismatch_or_eos_index */
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_or_eos_index(vector signed char __a,
+ vector signed char __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 3;
+ }
+ return __res[0] >> 3;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_or_eos_index(vector unsigned char __a,
+ vector unsigned char __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 3;
+ }
+ return __res[0] >> 3;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_or_eos_index(vector signed short __a,
+ vector signed short __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 4;
+ }
+ return __res[0] >> 4;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_or_eos_index(vector unsigned short __a,
+ vector unsigned short __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 4;
+ }
+ return __res[0] >> 4;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_or_eos_index(vector signed int __a, vector signed int __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 5;
+ }
+ return __res[0] >> 5;
+}
+
+static __inline__ unsigned __ATTRS_o_ai
+vec_first_mismatch_or_eos_index(vector unsigned int __a,
+ vector unsigned int __b) {
+ vector unsigned long long __res =
+#ifdef __LITTLE_ENDIAN__
+ vec_cnttz((vector unsigned long long)vec_cmpnez(__a, __b));
+#else
+ vec_cntlz((vector unsigned long long)vec_cmpnez(__a, __b));
+#endif
+ if (__res[0] == 64) {
+ return (__res[1] + 64) >> 5;
+ }
+ return __res[0] >> 5;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_insert_exp(vector double __a, vector unsigned long long __b) {
+ return __builtin_vsx_xviexpdp((vector unsigned long long)__a,__b);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_insert_exp(vector unsigned long long __a, vector unsigned long long __b) {
+ return __builtin_vsx_xviexpdp(__a,__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_insert_exp(vector float __a, vector unsigned int __b) {
+ return __builtin_vsx_xviexpsp((vector unsigned int)__a,__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_insert_exp(vector unsigned int __a, vector unsigned int __b) {
+ return __builtin_vsx_xviexpsp(__a,__b);
+}
+
+#if defined(__powerpc64__)
+static __inline__ vector signed char __ATTRS_o_ai vec_xl_len(signed char *__a,
+ size_t __b) {
+ return (vector signed char)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xl_len(unsigned char *__a, size_t __b) {
+ return (vector unsigned char)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector signed short __ATTRS_o_ai vec_xl_len(signed short *__a,
+ size_t __b) {
+ return (vector signed short)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_xl_len(unsigned short *__a, size_t __b) {
+ return (vector unsigned short)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector signed int __ATTRS_o_ai vec_xl_len(signed int *__a,
+ size_t __b) {
+ return (vector signed int)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_xl_len(unsigned int *__a,
+ size_t __b) {
+ return (vector unsigned int)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_xl_len(float *__a, size_t __b) {
+ return (vector float)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_xl_len(signed __int128 *__a, size_t __b) {
+ return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_len(unsigned __int128 *__a, size_t __b) {
+ return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_xl_len(signed long long *__a, size_t __b) {
+ return (vector signed long long)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_xl_len(unsigned long long *__a, size_t __b) {
+ return (vector unsigned long long)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_xl_len(double *__a,
+ size_t __b) {
+ return (vector double)__builtin_vsx_lxvl(__a, (__b << 56));
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_xl_len_r(unsigned char *__a,
+ size_t __b) {
+ vector unsigned char __res =
+ (vector unsigned char)__builtin_vsx_lxvll(__a, (__b << 56));
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __mask =
+ (vector unsigned char)__builtin_altivec_lvsr(16 - __b, (int *)NULL);
+ __res = (vector unsigned char)__builtin_altivec_vperm_4si(
+ (vector int)__res, (vector int)__res, __mask);
+#endif
+ return __res;
+}
+
+// vec_xst_len
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned char __a,
+ unsigned char *__b,
+ size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed char __a,
+ signed char *__b, size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed short __a,
+ signed short *__b, size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned short __a,
+ unsigned short *__b,
+ size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed int __a,
+ signed int *__b, size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned int __a,
+ unsigned int *__b, size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector float __a, float *__b,
+ size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed __int128 __a,
+ signed __int128 *__b,
+ size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned __int128 __a,
+ unsigned __int128 *__b,
+ size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed long long __a,
+ signed long long *__b,
+ size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned long long __a,
+ unsigned long long *__b,
+ size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len(vector double __a, double *__b,
+ size_t __c) {
+ return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a,
+ unsigned char *__b,
+ size_t __c) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned char __mask =
+ (vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL);
+ vector unsigned char __res =
+ __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask);
+ return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56));
+#else
+ return __builtin_vsx_stxvll((vector int)__a, __b, (__c << 56));
+#endif
+}
+#endif
+#endif
+
/* vec_cpsgn */
#ifdef __VSX__
@@ -2016,20 +2969,284 @@ vec_vctuxs(vector float __a, int __b) {
return __builtin_altivec_vctuxs(__a, __b);
}
+/* vec_signed */
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_sld(vector signed int, vector signed int, unsigned const int __c);
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signed(vector float __a) {
+ return __builtin_convertvector(__a, vector signed int);
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_signed(vector double __a) {
+ return __builtin_convertvector(__a, vector signed long long);
+}
+
+static __inline__ vector signed int __attribute__((__always_inline__))
+vec_signed2(vector double __a, vector double __b) {
+ return (vector signed int) { __a[0], __a[1], __b[0], __b[1] };
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signede(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
+ return vec_sld(__ret, __ret, 12);
+#else
+ return __builtin_vsx_xvcvdpsxws(__a);
+#endif
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_signedo(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvdpsxws(__a);
+#else
+ vector signed int __ret = __builtin_vsx_xvcvdpsxws(__a);
+ return vec_sld(__ret, __ret, 12);
+#endif
+}
+#endif
+
+/* vec_unsigned */
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sld(vector unsigned int, vector unsigned int, unsigned const int __c);
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_unsigned(vector float __a) {
+ return __builtin_convertvector(__a, vector unsigned int);
+}
+
+#ifdef __VSX__
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_unsigned(vector double __a) {
+ return __builtin_convertvector(__a, vector unsigned long long);
+}
+
+static __inline__ vector unsigned int __attribute__((__always_inline__))
+vec_unsigned2(vector double __a, vector double __b) {
+ return (vector unsigned int) { __a[0], __a[1], __b[0], __b[1] };
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_unsignede(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
+ return vec_sld(__ret, __ret, 12);
+#else
+ return __builtin_vsx_xvcvdpuxws(__a);
+#endif
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_unsignedo(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvdpuxws(__a);
+#else
+ vector unsigned int __ret = __builtin_vsx_xvcvdpuxws(__a);
+ return vec_sld(__ret, __ret, 12);
+#endif
+}
+#endif
+
+/* vec_float */
+
+static __inline__ vector float __ATTRS_o_ai
+vec_sld(vector float, vector float, unsigned const int __c);
+
+static __inline__ vector float __ATTRS_o_ai
+vec_float(vector signed int __a) {
+ return __builtin_convertvector(__a, vector float);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_float(vector unsigned int __a) {
+ return __builtin_convertvector(__a, vector float);
+}
+
+#ifdef __VSX__
+static __inline__ vector float __ATTRS_o_ai
+vec_float2(vector signed long long __a, vector signed long long __b) {
+ return (vector float) { __a[0], __a[1], __b[0], __b[1] };
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_float2(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector float) { __a[0], __a[1], __b[0], __b[1] };
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_float2(vector double __a, vector double __b) {
+ return (vector float) { __a[0], __a[1], __b[0], __b[1] };
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floate(vector signed long long __a) {
+#ifdef __LITTLE_ENDIAN__
+ vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#else
+ return __builtin_vsx_xvcvsxdsp(__a);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floate(vector unsigned long long __a) {
+#ifdef __LITTLE_ENDIAN__
+ vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#else
+ return __builtin_vsx_xvcvuxdsp(__a);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floate(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ vector float __ret = __builtin_vsx_xvcvdpsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#else
+ return __builtin_vsx_xvcvdpsp(__a);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floato(vector signed long long __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvsxdsp(__a);
+#else
+ vector float __ret = __builtin_vsx_xvcvsxdsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floato(vector unsigned long long __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvuxdsp(__a);
+#else
+ vector float __ret = __builtin_vsx_xvcvuxdsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#endif
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_floato(vector double __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvdpsp(__a);
+#else
+ vector float __ret = __builtin_vsx_xvcvdpsp(__a);
+ return vec_sld(__ret, __ret, 12);
+#endif
+}
+#endif
+
/* vec_double */
#ifdef __VSX__
static __inline__ vector double __ATTRS_o_ai
vec_double(vector signed long long __a) {
+ return __builtin_convertvector(__a, vector double);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_double(vector unsigned long long __a) {
+ return __builtin_convertvector(__a, vector double);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublee(vector signed int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
+#else
+ return __builtin_vsx_xvcvsxwdp(__a);
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublee(vector unsigned int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
+#else
+ return __builtin_vsx_xvcvuxwdp(__a);
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublee(vector float __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
+#else
+ return __builtin_vsx_xvcvspdp(__a);
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doubleh(vector signed int __a) {
vector double __ret = {__a[0], __a[1]};
return __ret;
}
static __inline__ vector double __ATTRS_o_ai
-vec_double(vector unsigned long long __a) {
+vec_doubleh(vector unsigned int __a) {
vector double __ret = {__a[0], __a[1]};
return __ret;
}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doubleh(vector float __a) {
+ vector double __ret = {__a[0], __a[1]};
+ return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublel(vector signed int __a) {
+ vector double __ret = {__a[2], __a[3]};
+ return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublel(vector unsigned int __a) {
+ vector double __ret = {__a[2], __a[3]};
+ return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doublel(vector float __a) {
+ vector double __ret = {__a[2], __a[3]};
+ return __ret;
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doubleo(vector signed int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvsxwdp(__a);
+#else
+ return __builtin_vsx_xvcvsxwdp(vec_sld(__a, __a, 4));
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doubleo(vector unsigned int __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvuxwdp(__a);
+#else
+ return __builtin_vsx_xvcvuxwdp(vec_sld(__a, __a, 4));
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_doubleo(vector float __a) {
+#ifdef __LITTLE_ENDIAN__
+ return __builtin_vsx_xvcvspdp(__a);
+#else
+ return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
+#endif
+}
#endif
/* vec_div */
@@ -3835,6 +5052,34 @@ vec_mergee(vector unsigned int __a, vector unsigned int __b) {
0x18, 0x19, 0x1A, 0x1B));
}
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_mergee(vector bool long long __a, vector bool long long __b) {
+ return vec_mergeh(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mergee(vector signed long long __a, vector signed long long __b) {
+ return vec_mergeh(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mergee(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_mergeh(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_mergee(vector float __a, vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x00, 0x01, 0x02, 0x03, 0x10, 0x11,
+ 0x12, 0x13, 0x08, 0x09, 0x0A, 0x0B,
+ 0x18, 0x19, 0x1A, 0x1B));
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_mergee(vector double __a, vector double __b) {
+ return vec_mergeh(__a, __b);
+}
+
/* vec_mergeo */
static __inline__ vector bool int __ATTRS_o_ai vec_mergeo(vector bool int __a,
@@ -3861,6 +5106,34 @@ vec_mergeo(vector unsigned int __a, vector unsigned int __b) {
0x1C, 0x1D, 0x1E, 0x1F));
}
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_mergeo(vector bool long long __a, vector bool long long __b) {
+ return vec_mergel(__a, __b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_mergeo(vector signed long long __a, vector signed long long __b) {
+ return vec_mergel(__a, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_mergeo(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_mergel(__a, __b);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_mergeo(vector float __a, vector float __b) {
+ return vec_perm(__a, __b,
+ (vector unsigned char)(0x04, 0x05, 0x06, 0x07, 0x14, 0x15,
+ 0x16, 0x17, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_mergeo(vector double __a, vector double __b) {
+ return vec_mergel(__a, __b);
+}
+
#endif
/* vec_mfvscr */
@@ -4689,6 +5962,12 @@ static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
return ~(__a & __b);
}
+static __inline__ vector float __ATTRS_o_ai
+vec_nand(vector float __a, vector float __b) {
+ return (vector float)(~((vector unsigned int)__a &
+ (vector unsigned int)__b));
+}
+
static __inline__ vector signed long long __ATTRS_o_ai
vec_nand(vector signed long long __a, vector signed long long __b) {
return ~(__a & __b);
@@ -4724,6 +6003,12 @@ vec_nand(vector bool long long __a, vector bool long long __b) {
return ~(__a & __b);
}
+static __inline__ vector double __ATTRS_o_ai
+vec_nand(vector double __a, vector double __b) {
+ return (vector double)(~((vector unsigned long long)__a &
+ (vector unsigned long long)__b));
+}
+
#endif
/* vec_nmadd */
@@ -5195,6 +6480,16 @@ static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
return __a | ~__b;
}
+static __inline__ vector float __ATTRS_o_ai
+vec_orc(vector bool int __a, vector float __b) {
+ return (vector float)(__a | ~(vector unsigned int)__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_orc(vector float __a, vector bool int __b) {
+ return (vector float)((vector unsigned int)__a | ~__b);
+}
+
static __inline__ vector signed long long __ATTRS_o_ai
vec_orc(vector signed long long __a, vector signed long long __b) {
return __a | ~__b;
@@ -5229,6 +6524,16 @@ static __inline__ vector bool long long __ATTRS_o_ai
vec_orc(vector bool long long __a, vector bool long long __b) {
return __a | ~__b;
}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_orc(vector double __a, vector bool long long __b) {
+ return (vector double)((vector unsigned long long)__a | ~__b);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_orc(vector bool long long __a, vector double __b) {
+ return (vector double)(__a | ~(vector unsigned long long)__b);
+}
#endif
/* vec_vor */
@@ -5536,8 +6841,25 @@ vec_pack(vector bool long long __a, vector bool long long __b) {
#endif
}
+static __inline__ vector float __ATTRS_o_ai
+vec_pack(vector double __a, vector double __b) {
+ return (vector float) (__a[0], __a[1], __b[0], __b[1]);
+}
#endif
+#ifdef __POWER9_VECTOR__
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_pack_to_short_fp32(vector float __a, vector float __b) {
+ vector float __resa = __builtin_vsx_xvcvsphp(__a);
+ vector float __resb = __builtin_vsx_xvcvsphp(__b);
+#ifdef __LITTLE_ENDIAN__
+ return (vector unsigned short)vec_mergee(__resa, __resb);
+#else
+ return (vector unsigned short)vec_mergeo(__resa, __resb);
+#endif
+}
+
+#endif
/* vec_vpkuhum */
#define __builtin_altivec_vpkuhum vec_vpkuhum
@@ -6324,6 +7646,34 @@ vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
}
#endif
+/* vec_rlmi */
+#ifdef __POWER9_VECTOR__
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_rlmi(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vrlwmi(__a, __c, __b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_rlmi(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return __builtin_altivec_vrldmi(__a, __c, __b);
+}
+
+/* vec_rlnm */
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_rlnm(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return __builtin_altivec_vrlwnm(__a, __b) & __c;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_rlnm(vector unsigned long long __a, vector unsigned long long __b,
+ vector unsigned long long __c) {
+ return __builtin_altivec_vrldnm(__a, __b) & __c;
+}
+#endif
+
/* vec_vrlb */
static __inline__ vector signed char __ATTRS_o_ai
@@ -6984,6 +8334,145 @@ static __inline__ vector float __ATTRS_o_ai vec_sld(vector float __a,
#endif
}
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_sld(vector bool long long __a, vector bool long long __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sld(vector signed long long __a, vector signed long long __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sld(vector unsigned long long __a, vector unsigned long long __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_sld(vector double __a,
+ vector double __b,
+ unsigned const int __c) {
+ unsigned char __d = __c & 0x0F;
+#ifdef __LITTLE_ENDIAN__
+ return vec_perm(
+ __b, __a, (vector unsigned char)(16 - __d, 17 - __d, 18 - __d, 19 - __d,
+ 20 - __d, 21 - __d, 22 - __d, 23 - __d,
+ 24 - __d, 25 - __d, 26 - __d, 27 - __d,
+ 28 - __d, 29 - __d, 30 - __d, 31 - __d));
+#else
+ return vec_perm(
+ __a, __b,
+ (vector unsigned char)(__d, __d + 1, __d + 2, __d + 3, __d + 4, __d + 5,
+ __d + 6, __d + 7, __d + 8, __d + 9, __d + 10,
+ __d + 11, __d + 12, __d + 13, __d + 14, __d + 15));
+#endif
+}
+#endif
+
+/* vec_sldw */
+static __inline__ vector signed char __ATTRS_o_ai vec_sldw(
+ vector signed char __a, vector signed char __b, unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_sldw(vector unsigned char __a, vector unsigned char __b,
+ unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector signed short __ATTRS_o_ai vec_sldw(
+ vector signed short __a, vector signed short __b, unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_sldw(vector unsigned short __a, vector unsigned short __b,
+ unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_sldw(vector signed int __a, vector signed int __b, unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai vec_sldw(
+ vector unsigned int __a, vector unsigned int __b, unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sldw(vector signed long long __a, vector signed long long __b,
+ unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sldw(vector unsigned long long __a, vector unsigned long long __b,
+ unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+#endif
+
+#ifdef __POWER9_VECTOR__
+/* vec_slv */
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_slv(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vslv(__a, __b);
+}
+
+/* vec_srv */
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_srv(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vsrv(__a, __b);
+}
+#endif
+
/* vec_vsldoi */
static __inline__ vector signed char __ATTRS_o_ai
@@ -7307,6 +8796,20 @@ vec_sll(vector bool int __a, vector unsigned int __b) {
(vector int)__b);
}
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sll(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sll(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_altivec_vsl((vector int)__a,
+ (vector int)__b);
+}
+#endif
+
/* vec_vsl */
static __inline__ vector signed char __ATTRS_o_ai
@@ -7570,6 +9073,32 @@ static __inline__ vector float __ATTRS_o_ai vec_slo(vector float __a,
return (vector float)__builtin_altivec_vslo((vector int)__a, (vector int)__b);
}
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_slo(vector signed long long __a, vector signed char __b) {
+ return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_slo(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_slo(vector unsigned long long __a, vector signed char __b) {
+ return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_slo(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_altivec_vslo((vector int)__a,
+ (vector int)__b);
+}
+#endif
+
/* vec_vslo */
static __inline__ vector signed char __ATTRS_o_ai
@@ -8304,6 +9833,20 @@ vec_srl(vector bool int __a, vector unsigned int __b) {
(vector int)__b);
}
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_srl(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_srl(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_altivec_vsr((vector int)__a,
+ (vector int)__b);
+}
+#endif
+
/* vec_vsr */
static __inline__ vector signed char __ATTRS_o_ai
@@ -8567,6 +10110,32 @@ static __inline__ vector float __ATTRS_o_ai vec_sro(vector float __a,
return (vector float)__builtin_altivec_vsro((vector int)__a, (vector int)__b);
}
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sro(vector signed long long __a, vector signed char __b) {
+ return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sro(vector signed long long __a, vector unsigned char __b) {
+ return (vector signed long long)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sro(vector unsigned long long __a, vector signed char __b) {
+ return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sro(vector unsigned long long __a, vector unsigned char __b) {
+ return (vector unsigned long long)__builtin_altivec_vsro((vector int)__a,
+ (vector int)__b);
+}
+#endif
+
/* vec_vsro */
static __inline__ vector signed char __ATTRS_o_ai
@@ -9580,6 +11149,12 @@ vec_vsubfp(vector float __a, vector float __b) {
/* vec_subc */
+static __inline__ vector signed int __ATTRS_o_ai
+vec_subc(vector signed int __a, vector signed int __b) {
+ return (vector signed int)__builtin_altivec_vsubcuw((vector unsigned int)__a,
+ (vector unsigned int) __b);
+}
+
static __inline__ vector unsigned int __ATTRS_o_ai
vec_subc(vector unsigned int __a, vector unsigned int __b) {
return __builtin_altivec_vsubcuw(__a, __b);
@@ -9813,6 +11388,7 @@ vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
/* vec_vsubeuqm */
+
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
@@ -9825,6 +11401,18 @@ vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
return __builtin_altivec_vsubeuqm(__a, __b, __c);
}
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_sube(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vsubeuqm(__a, __b, __c);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_sube(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vsubeuqm(__a, __b, __c);
+}
+
/* vec_vsubcuq */
static __inline__ vector signed __int128 __ATTRS_o_ai
@@ -9850,8 +11438,47 @@ vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
vector unsigned __int128 __c) {
return __builtin_altivec_vsubecuq(__a, __b, __c);
}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_subec(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ return vec_addec(__a, ~__b, __c);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_subec(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ return vec_addec(__a, ~__b, __c);
+}
+
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_subec(vector signed __int128 __a, vector signed __int128 __b,
+ vector signed __int128 __c) {
+ return __builtin_altivec_vsubecuq(__a, __b, __c);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_subec(vector unsigned __int128 __a, vector unsigned __int128 __b,
+ vector unsigned __int128 __c) {
+ return __builtin_altivec_vsubecuq(__a, __b, __c);
+}
#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector signed int __ATTRS_o_ai
+vec_sube(vector signed int __a, vector signed int __b,
+ vector signed int __c) {
+ vector signed int __mask = {1, 1, 1, 1};
+ vector signed int __carry = __c & __mask;
+ return vec_adde(__a, ~__b, __carry);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_sube(vector unsigned int __a, vector unsigned int __b,
+ vector unsigned int __c) {
+ vector unsigned int __mask = {1, 1, 1, 1};
+ vector unsigned int __carry = __c & __mask;
+ return vec_adde(__a, ~__b, __carry);
+}
/* vec_sum4s */
static __inline__ vector int __ATTRS_o_ai vec_sum4s(vector signed char __a,
@@ -10051,6 +11678,11 @@ vec_unpackh(vector bool int __a) {
return (vector bool long long)__builtin_altivec_vupkhsw((vector int)__a);
#endif
}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_unpackh(vector float __a) {
+ return (vector double)(__a[0], __a[1]);
+}
#endif
/* vec_vupkhsb */
@@ -10185,6 +11817,11 @@ vec_unpackl(vector bool int __a) {
return (vector bool long long)__builtin_altivec_vupklsw((vector int)__a);
#endif
}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_unpackl(vector float __a) {
+ return (vector double)(__a[2], __a[3]);
+}
#endif
/* vec_vupklsb */
@@ -10935,6 +12572,55 @@ static __inline__ float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
return __a[__b];
}
+#ifdef __POWER9_VECTOR__
+
+/* vec_extract_exp */
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_extract_exp(vector float __a) {
+ return __builtin_vsx_xvxexpsp(__a);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_extract_exp(vector double __a) {
+ return __builtin_vsx_xvxexpdp(__a);
+}
+
+/* vec_extract_sig */
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_extract_sig(vector float __a) {
+ return __builtin_vsx_xvxsigsp(__a);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_extract_sig (vector double __a) {
+ return __builtin_vsx_xvxsigdp(__a);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_extract_fp32_from_shorth(vector unsigned short __a) {
+ vector unsigned short __b =
+#ifdef __LITTLE_ENDIAN__
+ __builtin_shufflevector(__a, __a, 0, -1, 1, -1, 2, -1, 3, -1);
+#else
+ __builtin_shufflevector(__a, __a, -1, 0, -1, 1, -1, 2, -1, 3);
+#endif
+ return __builtin_vsx_xvcvhpsp(__b);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_extract_fp32_from_shortl(vector unsigned short __a) {
+ vector unsigned short __b =
+#ifdef __LITTLE_ENDIAN__
+ __builtin_shufflevector(__a, __a, 4, -1, 5, -1, 6, -1, 7, -1);
+#else
+ __builtin_shufflevector(__a, __a, -1, 4, -1, 5, -1, 6, -1, 7);
+#endif
+ return __builtin_vsx_xvcvhpsp(__b);
+}
+#endif /* __POWER9_VECTOR__ */
+
/* vec_insert */
static __inline__ vector signed char __ATTRS_o_ai
@@ -14369,6 +16055,24 @@ __builtin_crypto_vncipherlast(vector unsigned long long __a,
#endif
#ifdef __POWER8_VECTOR__
+static __inline__ vector bool char __ATTRS_o_ai
+vec_permxor(vector bool char __a, vector bool char __b,
+ vector bool char __c) {
+ return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_permxor(vector signed char __a, vector signed char __b,
+ vector signed char __c) {
+ return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_permxor(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+}
+
static __inline__ vector unsigned char __ATTRS_o_ai
__builtin_crypto_vpermxor(vector unsigned char __a, vector unsigned char __b,
vector unsigned char __c) {
@@ -14453,6 +16157,572 @@ vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) {
#endif
#endif
+
+/* vec_reve */
+
+static inline __ATTRS_o_ai vector bool char vec_reve(vector bool char __a) {
+ return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
+ 5, 4, 3, 2, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector signed char vec_reve(vector signed char __a) {
+ return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
+ 5, 4, 3, 2, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_reve(vector unsigned char __a) {
+ return __builtin_shufflevector(__a, __a, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
+ 5, 4, 3, 2, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector bool int vec_reve(vector bool int __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector signed int vec_reve(vector signed int __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned int
+vec_reve(vector unsigned int __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector bool short vec_reve(vector bool short __a) {
+ return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector signed short
+vec_reve(vector signed short __a) {
+ return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_reve(vector unsigned short __a) {
+ return __builtin_shufflevector(__a, __a, 7, 6, 5, 4, 3, 2, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector float vec_reve(vector float __a) {
+ return __builtin_shufflevector(__a, __a, 3, 2, 1, 0);
+}
+
+#ifdef __VSX__
+static inline __ATTRS_o_ai vector bool long long
+vec_reve(vector bool long long __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector signed long long
+vec_reve(vector signed long long __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_reve(vector unsigned long long __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0);
+}
+
+static inline __ATTRS_o_ai vector double vec_reve(vector double __a) {
+ return __builtin_shufflevector(__a, __a, 1, 0);
+}
+#endif
+
+/* vec_revb */
+static __inline__ vector bool char __ATTRS_o_ai
+vec_revb(vector bool char __a) {
+ return __a;
+}
+
+static __inline__ vector signed char __ATTRS_o_ai
+vec_revb(vector signed char __a) {
+ return __a;
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_revb(vector unsigned char __a) {
+ return __a;
+}
+
+static __inline__ vector bool short __ATTRS_o_ai
+vec_revb(vector bool short __a) {
+ vector unsigned char __indices =
+ { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
+ return vec_perm(__a, __a, __indices);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_revb(vector signed short __a) {
+ vector unsigned char __indices =
+ { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
+ return vec_perm(__a, __a, __indices);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_revb(vector unsigned short __a) {
+ vector unsigned char __indices =
+ { 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14 };
+ return vec_perm(__a, __a, __indices);
+}
+
+static __inline__ vector bool int __ATTRS_o_ai
+vec_revb(vector bool int __a) {
+ vector unsigned char __indices =
+ { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
+ return vec_perm(__a, __a, __indices);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_revb(vector signed int __a) {
+ vector unsigned char __indices =
+ { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
+ return vec_perm(__a, __a, __indices);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_revb(vector unsigned int __a) {
+ vector unsigned char __indices =
+ { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
+ return vec_perm(__a, __a, __indices);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_revb(vector float __a) {
+ vector unsigned char __indices =
+ { 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12 };
+ return vec_perm(__a, __a, __indices);
+}
+
+#ifdef __VSX__
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_revb(vector bool long long __a) {
+ vector unsigned char __indices =
+ { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
+ return vec_perm(__a, __a, __indices);
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_revb(vector signed long long __a) {
+ vector unsigned char __indices =
+ { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
+ return vec_perm(__a, __a, __indices);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_revb(vector unsigned long long __a) {
+ vector unsigned char __indices =
+ { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
+ return vec_perm(__a, __a, __indices);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_revb(vector double __a) {
+ vector unsigned char __indices =
+ { 7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8 };
+ return vec_perm(__a, __a, __indices);
+}
+#endif /* End __VSX__ */
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_revb(vector signed __int128 __a) {
+ vector unsigned char __indices =
+ { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
+ return (vector signed __int128)vec_perm((vector signed int)__a,
+ (vector signed int)__a,
+ __indices);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_revb(vector unsigned __int128 __a) {
+ vector unsigned char __indices =
+ { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 };
+ return (vector unsigned __int128)vec_perm((vector signed int)__a,
+ (vector signed int)__a,
+ __indices);
+}
+#endif /* END __POWER8_VECTOR__ && __powerpc64__ */
+
+/* vec_xl */
+
+static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
+ signed char *__ptr) {
+ return *(vector signed char *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned char
+vec_xl(signed long long __offset, unsigned char *__ptr) {
+ return *(vector unsigned char *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
+ signed short *__ptr) {
+ return *(vector signed short *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned short
+vec_xl(signed long long __offset, unsigned short *__ptr) {
+ return *(vector unsigned short *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
+ signed int *__ptr) {
+ return *(vector signed int *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
+ unsigned int *__ptr) {
+ return *(vector unsigned int *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
+ float *__ptr) {
+ return *(vector float *)(__ptr + __offset);
+}
+
+#ifdef __VSX__
+static inline __ATTRS_o_ai vector signed long long
+vec_xl(signed long long __offset, signed long long *__ptr) {
+ return *(vector signed long long *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned long long
+vec_xl(signed long long __offset, unsigned long long *__ptr) {
+ return *(vector unsigned long long *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
+ double *__ptr) {
+ return *(vector double *)(__ptr + __offset);
+}
+#endif
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static inline __ATTRS_o_ai vector signed __int128
+vec_xl(signed long long __offset, signed __int128 *__ptr) {
+ return *(vector signed __int128 *)(__ptr + __offset);
+}
+
+static inline __ATTRS_o_ai vector unsigned __int128
+vec_xl(signed long long __offset, unsigned __int128 *__ptr) {
+ return *(vector unsigned __int128 *)(__ptr + __offset);
+}
+#endif
+
+/* vec_xl_be */
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ vector signed char __ATTRS_o_ai
+vec_xl_be(signed long long __offset, signed char *__ptr) {
+ vector signed char __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+ return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
+ 13, 12, 11, 10, 9, 8);
+}
+
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_xl_be(signed long long __offset, unsigned char *__ptr) {
+ vector unsigned char __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+ return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
+ 13, 12, 11, 10, 9, 8);
+}
+
+static __inline__ vector signed short __ATTRS_o_ai
+vec_xl_be(signed long long __offset, signed short *__ptr) {
+ vector signed short __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+ return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
+}
+
+static __inline__ vector unsigned short __ATTRS_o_ai
+vec_xl_be(signed long long __offset, unsigned short *__ptr) {
+ vector unsigned short __vec = __builtin_vsx_lxvd2x_be(__offset, __ptr);
+ return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_xl_be(signed long long __offset, signed int *__ptr) {
+ return (vector signed int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_xl_be(signed long long __offset, unsigned int *__ptr) {
+ return (vector unsigned int)__builtin_vsx_lxvw4x_be(__offset, __ptr);
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_xl_be(signed long long __offset, float *__ptr) {
+ return (vector float)__builtin_vsx_lxvw4x_be(__offset, __ptr);
+}
+
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_xl_be(signed long long __offset, signed long long *__ptr) {
+ return (vector signed long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_xl_be(signed long long __offset, unsigned long long *__ptr) {
+ return (vector unsigned long long)__builtin_vsx_lxvd2x_be(__offset, __ptr);
+}
+
+static __inline__ vector double __ATTRS_o_ai
+vec_xl_be(signed long long __offset, double *__ptr) {
+ return (vector double)__builtin_vsx_lxvd2x_be(__offset, __ptr);
+}
+#endif
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ vector signed __int128 __ATTRS_o_ai
+vec_xl_be(signed long long __offset, signed __int128 *__ptr) {
+ return vec_xl(__offset, __ptr);
+}
+
+static __inline__ vector unsigned __int128 __ATTRS_o_ai
+vec_xl_be(signed long long __offset, unsigned __int128 *__ptr) {
+ return vec_xl(__offset, __ptr);
+}
+#endif
+#else
+ #define vec_xl_be vec_xl
+#endif
+
+/* vec_xst */
+
+static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
+ signed long long __offset,
+ signed char *__ptr) {
+ *(vector signed char *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
+ signed long long __offset,
+ unsigned char *__ptr) {
+ *(vector unsigned char *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
+ signed long long __offset,
+ signed short *__ptr) {
+ *(vector signed short *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
+ signed long long __offset,
+ unsigned short *__ptr) {
+ *(vector unsigned short *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
+ signed long long __offset,
+ signed int *__ptr) {
+ *(vector signed int *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
+ signed long long __offset,
+ unsigned int *__ptr) {
+ *(vector unsigned int *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector float __vec,
+ signed long long __offset,
+ float *__ptr) {
+ *(vector float *)(__ptr + __offset) = __vec;
+}
+
+#ifdef __VSX__
+static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
+ signed long long __offset,
+ signed long long *__ptr) {
+ *(vector signed long long *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
+ signed long long __offset,
+ unsigned long long *__ptr) {
+ *(vector unsigned long long *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector double __vec,
+ signed long long __offset,
+ double *__ptr) {
+ *(vector double *)(__ptr + __offset) = __vec;
+}
+#endif
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
+ signed long long __offset,
+ signed __int128 *__ptr) {
+ *(vector signed __int128 *)(__ptr + __offset) = __vec;
+}
+
+static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
+ signed long long __offset,
+ unsigned __int128 *__ptr) {
+ *(vector unsigned __int128 *)(__ptr + __offset) = __vec;
+}
+#endif
+
+/* vec_xst_be */
+
+#ifdef __LITTLE_ENDIAN__
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed char __vec,
+ signed long long __offset,
+ signed char *__ptr) {
+ vector signed char __tmp =
+ __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
+ 13, 12, 11, 10, 9, 8);
+ __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned char __vec,
+ signed long long __offset,
+ unsigned char *__ptr) {
+ vector unsigned char __tmp =
+ __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
+ 13, 12, 11, 10, 9, 8);
+ __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed short __vec,
+ signed long long __offset,
+ signed short *__ptr) {
+ vector signed short __tmp =
+ __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
+ __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned short __vec,
+ signed long long __offset,
+ unsigned short *__ptr) {
+ vector unsigned short __tmp =
+ __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
+ __builtin_vsx_stxvd2x_be(__tmp, __offset, __ptr);
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed int __vec,
+ signed long long __offset,
+ signed int *__ptr) {
+ __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned int __vec,
+ signed long long __offset,
+ unsigned int *__ptr) {
+ __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector float __vec,
+ signed long long __offset,
+ float *__ptr) {
+ __builtin_vsx_stxvw4x_be(__vec, __offset, __ptr);
+}
+
+#ifdef __VSX__
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed long long __vec,
+ signed long long __offset,
+ signed long long *__ptr) {
+ __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned long long __vec,
+ signed long long __offset,
+ unsigned long long *__ptr) {
+ __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector double __vec,
+ signed long long __offset,
+ double *__ptr) {
+ __builtin_vsx_stxvd2x_be(__vec, __offset, __ptr);
+}
+#endif
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed __int128 __vec,
+ signed long long __offset,
+ signed __int128 *__ptr) {
+ vec_xst(__vec, __offset, __ptr);
+}
+
+static __inline__ void __ATTRS_o_ai vec_xst_be(vector unsigned __int128 __vec,
+ signed long long __offset,
+ unsigned __int128 *__ptr) {
+ vec_xst(__vec, __offset, __ptr);
+}
+#endif
+#else
+ #define vec_xst_be vec_xst
+#endif
+
+#ifdef __POWER9_VECTOR__
+#define vec_test_data_class(__a, __b) \
+ _Generic((__a), \
+ vector float: \
+ (vector bool int)__builtin_vsx_xvtstdcsp((__a), (__b)), \
+ vector double: \
+ (vector bool long long)__builtin_vsx_xvtstdcdp((__a), (__b)) \
+ )
+
+#endif /* #ifdef __POWER9_VECTOR__ */
+
+static vector float __ATTRS_o_ai vec_neg(vector float __a) {
+ return -__a;
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_neg(vector double __a) {
+ return -__a;
+}
+
+#endif
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector long long __ATTRS_o_ai vec_neg(vector long long __a) {
+ return -__a;
+}
+#endif
+
+static vector signed int __ATTRS_o_ai vec_neg(vector signed int __a) {
+ return -__a;
+}
+
+static vector signed short __ATTRS_o_ai vec_neg(vector signed short __a) {
+ return -__a;
+}
+
+static vector signed char __ATTRS_o_ai vec_neg(vector signed char __a) {
+ return -__a;
+}
+
+static vector float __ATTRS_o_ai vec_nabs(vector float __a) {
+ return - vec_abs(__a);
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_nabs(vector double __a) {
+ return - vec_abs(__a);
+}
+
+#endif
+
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+static vector long long __ATTRS_o_ai vec_nabs(vector long long __a) {
+ return __builtin_altivec_vminsd(__a, -__a);
+}
+#endif
+
+static vector signed int __ATTRS_o_ai vec_nabs(vector signed int __a) {
+ return __builtin_altivec_vminsw(__a, -__a);
+}
+
+static vector signed short __ATTRS_o_ai vec_nabs(vector signed short __a) {
+ return __builtin_altivec_vminsh(__a, -__a);
+}
+
+static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
+ return __builtin_altivec_vminsb(__a, -__a);
+}
#undef __ATTRS_o_ai
#endif /* __ALTIVEC_H */
diff --git a/lib/Headers/ammintrin.h b/lib/Headers/ammintrin.h
index 8985bb404f47..2843a7a2677f 100644
--- a/lib/Headers/ammintrin.h
+++ b/lib/Headers/ammintrin.h
@@ -30,7 +30,7 @@
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4a")))
/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
-/// integer vector operand at the index idx and of the length len.
+/// integer vector operand at the index \a idx and of the length \a len.
///
/// \headerfile <x86intrin.h>
///
@@ -38,7 +38,7 @@
/// __m128i _mm_extracti_si64(__m128i x, const int len, const int idx);
/// \endcode
///
-/// This intrinsic corresponds to the \c EXTRQ instruction.
+/// This intrinsic corresponds to the <c> EXTRQ </c> instruction.
///
/// \param x
/// The value from which bits are extracted.
@@ -49,8 +49,8 @@
/// Bits [5:0] specify the index of the least significant bit; the other
/// bits are ignored. If the sum of the index and length is greater than 64,
/// the result is undefined. If the length and index are both zero, bits
-/// [63:0] of parameter x are extracted. If the length is zero but the index
-/// is non-zero, the result is undefined.
+/// [63:0] of parameter \a x are extracted. If the length is zero but the
+/// index is non-zero, the result is undefined.
/// \returns A 128-bit integer vector whose lower 64 bits contain the bits
/// extracted from the source operand.
#define _mm_extracti_si64(x, len, idx) \
@@ -58,11 +58,12 @@
(char)(len), (char)(idx)))
/// \brief Extracts the specified bits from the lower 64 bits of the 128-bit
-/// integer vector operand at the index and of the length specified by __y.
+/// integer vector operand at the index and of the length specified by
+/// \a __y.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c EXTRQ instruction.
+/// This intrinsic corresponds to the <c> EXTRQ </c> instruction.
///
/// \param __x
/// The value from which bits are extracted.
@@ -71,8 +72,8 @@
/// length at [5:0]; all other bits are ignored. If bits [5:0] are zero, the
/// length is interpreted as 64. If the sum of the index and length is
/// greater than 64, the result is undefined. If the length and index are
-/// both zero, bits [63:0] of parameter __x are extracted. If the length is
-/// zero but the index is non-zero, the result is undefined.
+/// both zero, bits [63:0] of parameter \a __x are extracted. If the length
+/// is zero but the index is non-zero, the result is undefined.
/// \returns A 128-bit vector whose lower 64 bits contain the bits extracted
/// from the source operand.
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -81,9 +82,9 @@ _mm_extract_si64(__m128i __x, __m128i __y)
return (__m128i)__builtin_ia32_extrq((__v2di)__x, (__v16qi)__y);
}
-/// \brief Inserts bits of a specified length from the source integer vector y
-/// into the lower 64 bits of the destination integer vector x at the index
-/// idx and of the length len.
+/// \brief Inserts bits of a specified length from the source integer vector
+/// \a y into the lower 64 bits of the destination integer vector \a x at
+/// the index \a idx and of the length \a len.
///
/// \headerfile <x86intrin.h>
///
@@ -92,15 +93,15 @@ _mm_extract_si64(__m128i __x, __m128i __y)
/// const int idx);
/// \endcode
///
-/// This intrinsic corresponds to the \c INSERTQ instruction.
+/// This intrinsic corresponds to the <c> INSERTQ </c> instruction.
///
/// \param x
/// The destination operand where bits will be inserted. The inserted bits
-/// are defined by the length len and by the index idx specifying the least
-/// significant bit.
+/// are defined by the length \a len and by the index \a idx specifying the
+/// least significant bit.
/// \param y
/// The source operand containing the bits to be extracted. The extracted
-/// bits are the least significant bits of operand y of length len.
+/// bits are the least significant bits of operand \a y of length \a len.
/// \param len
/// Bits [5:0] specify the length; the other bits are ignored. If bits [5:0]
/// are zero, the length is interpreted as 64.
@@ -108,45 +109,43 @@ _mm_extract_si64(__m128i __x, __m128i __y)
/// Bits [5:0] specify the index of the least significant bit; the other
/// bits are ignored. If the sum of the index and length is greater than 64,
/// the result is undefined. If the length and index are both zero, bits
-/// [63:0] of parameter y are inserted into parameter x. If the length is
-/// zero but the index is non-zero, the result is undefined.
+/// [63:0] of parameter \a y are inserted into parameter \a x. If the length
+/// is zero but the index is non-zero, the result is undefined.
/// \returns A 128-bit integer vector containing the original lower 64-bits of
-/// destination operand x with the specified bitfields replaced by the lower
-/// bits of source operand y. The upper 64 bits of the return value are
-/// undefined.
-
+/// destination operand \a x with the specified bitfields replaced by the
+/// lower bits of source operand \a y. The upper 64 bits of the return value
+/// are undefined.
#define _mm_inserti_si64(x, y, len, idx) \
((__m128i)__builtin_ia32_insertqi((__v2di)(__m128i)(x), \
(__v2di)(__m128i)(y), \
(char)(len), (char)(idx)))
/// \brief Inserts bits of a specified length from the source integer vector
-/// __y into the lower 64 bits of the destination integer vector __x at the
-/// index and of the length specified by __y.
+/// \a __y into the lower 64 bits of the destination integer vector \a __x
+/// at the index and of the length specified by \a __y.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c INSERTQ instruction.
+/// This intrinsic corresponds to the <c> INSERTQ </c> instruction.
///
/// \param __x
/// The destination operand where bits will be inserted. The inserted bits
/// are defined by the length and by the index of the least significant bit
-/// specified by operand __y.
+/// specified by operand \a __y.
/// \param __y
/// The source operand containing the bits to be extracted. The extracted
-/// bits are the least significant bits of operand __y with length specified
-/// by bits [69:64]. These are inserted into the destination at the index
-/// specified by bits [77:72]; all other bits are ignored. If bits [69:64]
-/// are zero, the length is interpreted as 64. If the sum of the index and
-/// length is greater than 64, the result is undefined. If the length and
-/// index are both zero, bits [63:0] of parameter __y are inserted into
-/// parameter __x. If the length is zero but the index is non-zero, the
-/// result is undefined.
+/// bits are the least significant bits of operand \a __y with length
+/// specified by bits [69:64]. These are inserted into the destination at the
+/// index specified by bits [77:72]; all other bits are ignored. If bits
+/// [69:64] are zero, the length is interpreted as 64. If the sum of the
+/// index and length is greater than 64, the result is undefined. If the
+/// length and index are both zero, bits [63:0] of parameter \a __y are
+/// inserted into parameter \a __x. If the length is zero but the index is
+/// non-zero, the result is undefined.
/// \returns A 128-bit integer vector containing the original lower 64-bits of
-/// destination operand __x with the specified bitfields replaced by the
-/// lower bits of source operand __y. The upper 64 bits of the return value
-/// are undefined.
-
+/// destination operand \a __x with the specified bitfields replaced by the
+/// lower bits of source operand \a __y. The upper 64 bits of the return
+/// value are undefined.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_insert_si64(__m128i __x, __m128i __y)
{
@@ -159,7 +158,7 @@ _mm_insert_si64(__m128i __x, __m128i __y)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c MOVNTSD instruction.
+/// This intrinsic corresponds to the <c> MOVNTSD </c> instruction.
///
/// \param __p
/// The 64-bit memory location used to store the register value.
@@ -177,7 +176,7 @@ _mm_stream_sd(double *__p, __m128d __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c MOVNTSS instruction.
+/// This intrinsic corresponds to the <c> MOVNTSS </c> instruction.
///
/// \param __p
/// The 32-bit memory location used to store the register value.
diff --git a/lib/Headers/armintr.h b/lib/Headers/armintr.h
new file mode 100644
index 000000000000..933afcbb91b6
--- /dev/null
+++ b/lib/Headers/armintr.h
@@ -0,0 +1,45 @@
+/*===---- armintr.h - ARM Windows intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* Only include this if we're compiling for the windows platform. */
+#ifndef _MSC_VER
+#include_next <armintr.h>
+#else
+
+#ifndef __ARMINTR_H
+#define __ARMINTR_H
+
+typedef enum
+{
+ _ARM_BARRIER_SY = 0xF,
+ _ARM_BARRIER_ST = 0xE,
+ _ARM_BARRIER_ISH = 0xB,
+ _ARM_BARRIER_ISHST = 0xA,
+ _ARM_BARRIER_NSH = 0x7,
+ _ARM_BARRIER_NSHST = 0x6,
+ _ARM_BARRIER_OSH = 0x3,
+ _ARM_BARRIER_OSHST = 0x2
+} _ARMINTR_BARRIER_TYPE;
+
+#endif /* __ARMINTR_H */
+#endif /* _MSC_VER */
diff --git a/lib/Headers/avx512bwintrin.h b/lib/Headers/avx512bwintrin.h
index d3c5a6c96446..629dc8611a7f 100644
--- a/lib/Headers/avx512bwintrin.h
+++ b/lib/Headers/avx512bwintrin.h
@@ -350,19 +350,17 @@ _mm512_add_epi8 (__m512i __A, __m512i __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_add_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __U);
+_mm512_mask_add_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_add_epi8(__A, __B),
+ (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_add_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_paddb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) __U);
+_mm512_maskz_add_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_add_epi8(__A, __B),
+ (__v64qi)_mm512_setzero_qi());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -371,19 +369,17 @@ _mm512_sub_epi8 (__m512i __A, __m512i __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sub_epi8 (__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __U);
+_mm512_mask_sub_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_sub_epi8(__A, __B),
+ (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sub_epi8 (__mmask64 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_psubb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) __U);
+_mm512_maskz_sub_epi8(__mmask64 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_sub_epi8(__A, __B),
+ (__v64qi)_mm512_setzero_qi());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -392,19 +388,17 @@ _mm512_add_epi16 (__m512i __A, __m512i __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_add_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+_mm512_mask_add_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_add_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_add_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_paddw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __U);
+_mm512_maskz_add_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_add_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -413,19 +407,17 @@ _mm512_sub_epi16 (__m512i __A, __m512i __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sub_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+_mm512_mask_sub_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_sub_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sub_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_psubw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __U);
+_mm512_maskz_sub_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_sub_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -434,19 +426,17 @@ _mm512_mullo_epi16 (__m512i __A, __m512i __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_mullo_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+_mm512_mask_mullo_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_mullo_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_mullo_epi16 (__mmask32 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_pmullw512_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) _mm512_setzero_hi(),
- (__mmask32) __U);
+_mm512_maskz_mullo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_mullo_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1018,31 +1008,25 @@ _mm512_mask_min_epu16 (__m512i __W, __mmask32 __M, __m512i __A,
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_shuffle_epi8 (__m512i __A, __m512i __B)
+_mm512_shuffle_epi8(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) -1);
+ return (__m512i)__builtin_ia32_pshufb512((__v64qi)__A,(__v64qi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_shuffle_epi8 (__m512i __W, __mmask64 __U, __m512i __A,
- __m512i __B)
+_mm512_mask_shuffle_epi8(__m512i __W, __mmask64 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) __W,
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_shuffle_epi8(__A, __B),
+ (__v64qi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_shuffle_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
+_mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pshufb512_mask ((__v64qi) __A,
- (__v64qi) __B,
- (__v64qi) _mm512_setzero_qi(),
- (__mmask64) __U);
+ return (__m512i)__builtin_ia32_selectb_512((__mmask64)__U,
+ (__v64qi)_mm512_shuffle_epi8(__A, __B),
+ (__v64qi)_mm512_setzero_qi());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1537,55 +1521,49 @@ _mm512_maskz_unpacklo_epi16(__mmask32 __U, __m512i __A, __m512i __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepi8_epi16 (__m256i __A)
+_mm512_cvtepi8_epi16(__m256i __A)
{
- return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) -1);
+ /* This function always performs a signed extension, but __v32qi is a char
+ which may be signed or unsigned, so use __v32qs. */
+ return (__m512i)__builtin_convertvector((__v32qs)__A, __v32hi);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepi8_epi16 (__m512i __W, __mmask32 __U, __m256i __A)
+_mm512_mask_cvtepi8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
{
- return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_cvtepi8_epi16(__A),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepi8_epi16 (__mmask32 __U, __m256i __A)
+_mm512_maskz_cvtepi8_epi16(__mmask32 __U, __m256i __A)
{
- return (__m512i) __builtin_ia32_pmovsxbw512_mask ((__v32qi) __A,
- (__v32hi)
- _mm512_setzero_hi(),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_cvtepi8_epi16(__A),
+ (__v32hi)_mm512_setzero_hi());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepu8_epi16 (__m256i __A)
+_mm512_cvtepu8_epi16(__m256i __A)
{
- return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) -1);
+ return (__m512i)__builtin_convertvector((__v32qu)__A, __v32hi);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepu8_epi16 (__m512i __W, __mmask32 __U, __m256i __A)
+_mm512_mask_cvtepu8_epi16(__m512i __W, __mmask32 __U, __m256i __A)
{
- return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_cvtepu8_epi16(__A),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepu8_epi16 (__mmask32 __U, __m256i __A)
+_mm512_maskz_cvtepu8_epi16(__mmask32 __U, __m256i __A)
{
- return (__m512i) __builtin_ia32_pmovzxbw512_mask ((__v32qi) __A,
- (__v32hi)
- _mm512_setzero_hi(),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_cvtepu8_epi16(__A),
+ (__v32hi)_mm512_setzero_hi());
}
@@ -1704,79 +1682,70 @@ _mm512_maskz_cvtepu8_epi16 (__mmask32 __U, __m256i __A)
(__v32hi)_mm512_setzero_hi()); })
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_sllv_epi16 (__m512i __A, __m512i __B)
+_mm512_sllv_epi16(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_psllv32hi((__v32hi) __A, (__v32hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m512i __B)
+_mm512_mask_sllv_epi16 (__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_sllv_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sllv_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+_mm512_maskz_sllv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psllv32hi_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_sllv_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_sll_epi16 (__m512i __A, __m128i __B)
+_mm512_sll_epi16(__m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
- (__v8hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_psllw512((__v32hi) __A, (__v8hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sll_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m128i __B)
+_mm512_mask_sll_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
- (__v8hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_sll_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sll_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+_mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psllw512_mask ((__v32hi) __A,
- (__v8hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_sll_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
}
-#define _mm512_slli_epi16(A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psllwi512_mask((__v32hi)(__m512i)(A), (int)(B), \
- (__v32hi)_mm512_setzero_hi(), \
- (__mmask32)-1); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_slli_epi16(__m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
+}
-#define _mm512_mask_slli_epi16(W, U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psllwi512_mask((__v32hi)(__m512i)(A), (int)(B), \
- (__v32hi)(__m512i)(W), \
- (__mmask32)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_slli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_slli_epi16(__A, __B),
+ (__v32hi)__W);
+}
-#define _mm512_maskz_slli_epi16(U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psllwi512_mask((__v32hi)(__m512i)(A), (int)(B), \
- (__v32hi)_mm512_setzero_hi(), \
- (__mmask32)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_slli_epi16(__mmask32 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_slli_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
+}
#define _mm512_bslli_epi128(a, imm) __extension__ ({ \
(__m512i)__builtin_shufflevector( \
@@ -1848,155 +1817,136 @@ _mm512_maskz_sll_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
((char)(imm)&0xF0) ? 63 : ((char)(imm)>0xF ? 79 : 127) - (char)(imm)); })
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_srlv_epi16 (__m512i __A, __m512i __B)
+_mm512_srlv_epi16(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_psrlv32hi((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_srlv_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m512i __B)
+_mm512_mask_srlv_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_srlv_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_srlv_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+_mm512_maskz_srlv_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psrlv32hi_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_srlv_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_srav_epi16 (__m512i __A, __m512i __B)
+_mm512_srav_epi16(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_psrav32hi((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_srav_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m512i __B)
+_mm512_mask_srav_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_srav_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_srav_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
+_mm512_maskz_srav_epi16(__mmask32 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psrav32hi_mask ((__v32hi) __A,
- (__v32hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_srav_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_sra_epi16 (__m512i __A, __m128i __B)
+_mm512_sra_epi16(__m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
- (__v8hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_psraw512((__v32hi) __A, (__v8hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sra_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m128i __B)
+_mm512_mask_sra_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
- (__v8hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_sra_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sra_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+_mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psraw512_mask ((__v32hi) __A,
- (__v8hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_sra_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
}
-#define _mm512_srai_epi16(A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \
- (__v32hi)_mm512_setzero_hi(), \
- (__mmask32)-1); })
-
-#define _mm512_mask_srai_epi16(W, U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \
- (__v32hi)(__m512i)(W), \
- (__mmask32)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srai_epi16(__m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
+}
-#define _mm512_maskz_srai_epi16(U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psrawi512_mask((__v32hi)(__m512i)(A), (int)(B), \
- (__v32hi)_mm512_setzero_hi(), \
- (__mmask32)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srai_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_srai_epi16(__A, __B),
+ (__v32hi)__W);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srai_epi16(__mmask32 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_srai_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
+}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_srl_epi16 (__m512i __A, __m128i __B)
+_mm512_srl_epi16(__m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
- (__v8hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) -1);
+ return (__m512i)__builtin_ia32_psrlw512((__v32hi) __A, (__v8hi) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_srl_epi16 (__m512i __W, __mmask32 __U, __m512i __A,
- __m128i __B)
+_mm512_mask_srl_epi16(__m512i __W, __mmask32 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
- (__v8hi) __B,
- (__v32hi) __W,
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_srl_epi16(__A, __B),
+ (__v32hi)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_srl_epi16 (__mmask32 __U, __m512i __A, __m128i __B)
+_mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrlw512_mask ((__v32hi) __A,
- (__v8hi) __B,
- (__v32hi)
- _mm512_setzero_hi (),
- (__mmask32) __U);
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_srl_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
}
-#define _mm512_srli_epi16(A, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(imm), \
- (__v32hi)_mm512_setzero_hi(), \
- (__mmask32)-1); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srli_epi16(__m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
+}
-#define _mm512_mask_srli_epi16(W, U, A, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(imm), \
- (__v32hi)(__m512i)(W), \
- (__mmask32)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srli_epi16(__m512i __W, __mmask32 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_srli_epi16(__A, __B),
+ (__v32hi)__W);
+}
-#define _mm512_maskz_srli_epi16(U, A, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_psrlwi512_mask((__v32hi)(__m512i)(A), (int)(imm), \
- (__v32hi)_mm512_setzero_hi(), \
- (__mmask32)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
+ (__v32hi)_mm512_srli_epi16(__A, __B),
+ (__v32hi)_mm512_setzero_hi());
+}
#define _mm512_bsrli_epi128(a, imm) __extension__ ({ \
(__m512i)__builtin_shufflevector( \
diff --git a/lib/Headers/avx512dqintrin.h b/lib/Headers/avx512dqintrin.h
index 13665e4c6668..ae44b98a9495 100644
--- a/lib/Headers/avx512dqintrin.h
+++ b/lib/Headers/avx512dqintrin.h
@@ -37,204 +37,169 @@ _mm512_mullo_epi64 (__m512i __A, __m512i __B) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_mullo_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di) __W,
- (__mmask8) __U);
+_mm512_mask_mullo_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_mullo_epi64(__A, __B),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_mullo_epi64 (__mmask8 __U, __m512i __A, __m512i __B) {
- return (__m512i) __builtin_ia32_pmullq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+_mm512_maskz_mullo_epi64(__mmask8 __U, __m512i __A, __m512i __B) {
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_mullo_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_xor_pd (__m512d __A, __m512d __B) {
- return (__m512d) ((__v8du) __A ^ (__v8du) __B);
+_mm512_xor_pd(__m512d __A, __m512d __B) {
+ return (__m512d)((__v8du)__A ^ (__v8du)__B);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_mask_xor_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __W,
- (__mmask8) __U);
+_mm512_mask_xor_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_xor_pd(__A, __B),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_maskz_xor_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_xorpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U);
+_mm512_maskz_xor_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_xor_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_xor_ps (__m512 __A, __m512 __B) {
- return (__m512) ((__v16su) __A ^ (__v16su) __B);
+ return (__m512)((__v16su)__A ^ (__v16su)__B);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_mask_xor_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __W,
- (__mmask16) __U);
+_mm512_mask_xor_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_xor_ps(__A, __B),
+ (__v16sf)__W);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_maskz_xor_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_xorps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U);
+_mm512_maskz_xor_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_xor_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_or_pd (__m512d __A, __m512d __B) {
- return (__m512d) ((__v8du) __A | (__v8du) __B);
+_mm512_or_pd(__m512d __A, __m512d __B) {
+ return (__m512d)((__v8du)__A | (__v8du)__B);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_mask_or_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __W,
- (__mmask8) __U);
+_mm512_mask_or_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_or_pd(__A, __B),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_maskz_or_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_orpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U);
+_mm512_maskz_or_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_or_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_or_ps (__m512 __A, __m512 __B) {
- return (__m512) ((__v16su) __A | (__v16su) __B);
+_mm512_or_ps(__m512 __A, __m512 __B) {
+ return (__m512)((__v16su)__A | (__v16su)__B);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_mask_or_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __W,
- (__mmask16) __U);
+_mm512_mask_or_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_or_ps(__A, __B),
+ (__v16sf)__W);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_maskz_or_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_orps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U);
+_mm512_maskz_or_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_or_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_and_pd (__m512d __A, __m512d __B) {
- return (__m512d) ((__v8du) __A & (__v8du) __B);
+_mm512_and_pd(__m512d __A, __m512d __B) {
+ return (__m512d)((__v8du)__A & (__v8du)__B);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_mask_and_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __W,
- (__mmask8) __U);
+_mm512_mask_and_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_and_pd(__A, __B),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_maskz_and_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_andpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U);
+_mm512_maskz_and_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_and_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_and_ps (__m512 __A, __m512 __B) {
- return (__m512) ((__v16su) __A & (__v16su) __B);
+_mm512_and_ps(__m512 __A, __m512 __B) {
+ return (__m512)((__v16su)__A & (__v16su)__B);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_mask_and_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __W,
- (__mmask16) __U);
+_mm512_mask_and_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_and_ps(__A, __B),
+ (__v16sf)__W);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_maskz_and_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_andps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U);
+_mm512_maskz_and_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_and_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_andnot_pd (__m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) -1);
+_mm512_andnot_pd(__m512d __A, __m512d __B) {
+ return (__m512d)(~(__v8du)__A & (__v8du)__B);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_mask_andnot_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __W,
- (__mmask8) __U);
+_mm512_mask_andnot_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_andnot_pd(__A, __B),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_maskz_andnot_pd (__mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_andnpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U);
+_mm512_maskz_andnot_pd(__mmask8 __U, __m512d __A, __m512d __B) {
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_andnot_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_andnot_ps (__m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) -1);
+_mm512_andnot_ps(__m512 __A, __m512 __B) {
+ return (__m512)(~(__v16su)__A & (__v16su)__B);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_mask_andnot_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __W,
- (__mmask16) __U);
+_mm512_mask_andnot_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_andnot_ps(__A, __B),
+ (__v16sf)__W);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_maskz_andnot_ps (__mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_andnps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U);
+_mm512_maskz_andnot_ps(__mmask16 __U, __m512 __A, __m512 __B) {
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_andnot_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -1151,148 +1116,184 @@ _mm512_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
}
#define _mm512_extractf32x8_ps(A, imm) __extension__ ({ \
- (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1); })
+ (__m256)__builtin_shufflevector((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_undefined_ps(), \
+ ((imm) & 1) ? 8 : 0, \
+ ((imm) & 1) ? 9 : 1, \
+ ((imm) & 1) ? 10 : 2, \
+ ((imm) & 1) ? 11 : 3, \
+ ((imm) & 1) ? 12 : 4, \
+ ((imm) & 1) ? 13 : 5, \
+ ((imm) & 1) ? 14 : 6, \
+ ((imm) & 1) ? 15 : 7); })
#define _mm512_mask_extractf32x8_ps(W, U, A, imm) __extension__ ({ \
- (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v8sf)(__m256)(W), \
- (__mmask8)(U)); })
+ (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm512_extractf32x8_ps((A), (imm)), \
+ (__v8sf)(W)); })
#define _mm512_maskz_extractf32x8_ps(U, A, imm) __extension__ ({ \
- (__m256)__builtin_ia32_extractf32x8_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U)); })
+ (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm512_extractf32x8_ps((A), (imm)), \
+ (__v8sf)_mm256_setzero_ps()); })
#define _mm512_extractf64x2_pd(A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
- (int)(imm), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1); })
+ (__m128d)__builtin_shufflevector((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_undefined_pd(), \
+ 0 + ((imm) & 0x3) * 2, \
+ 1 + ((imm) & 0x3) * 2); })
#define _mm512_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
- (int)(imm), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U)); })
+ (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm512_extractf64x2_pd((A), (imm)), \
+ (__v2df)(W)); })
#define _mm512_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_extractf64x2_512_mask((__v8df)(__m512d)(A), \
- (int)(imm), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U)); })
+ (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm512_extractf64x2_pd((A), (imm)), \
+ (__v2df)_mm_setzero_pd()); })
#define _mm512_extracti32x8_epi32(A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1); })
+ (__m256i)__builtin_shufflevector((__v16si)(__m512i)(A), \
+ (__v16si)_mm512_undefined_epi32(), \
+ ((imm) & 1) ? 8 : 0, \
+ ((imm) & 1) ? 9 : 1, \
+ ((imm) & 1) ? 10 : 2, \
+ ((imm) & 1) ? 11 : 3, \
+ ((imm) & 1) ? 12 : 4, \
+ ((imm) & 1) ? 13 : 5, \
+ ((imm) & 1) ? 14 : 6, \
+ ((imm) & 1) ? 15 : 7); })
#define _mm512_mask_extracti32x8_epi32(W, U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm512_extracti32x8_epi32((A), (imm)), \
+ (__v8si)(W)); })
#define _mm512_maskz_extracti32x8_epi32(U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_extracti32x8_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm512_extracti32x8_epi32((A), (imm)), \
+ (__v8si)_mm256_setzero_si256()); })
#define _mm512_extracti64x2_epi64(A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
- (int)(imm), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)-1); })
+ (__m128i)__builtin_shufflevector((__v8di)(__m512i)(A), \
+ (__v8di)_mm512_undefined_epi32(), \
+ 0 + ((imm) & 0x3) * 2, \
+ 1 + ((imm) & 0x3) * 2); })
#define _mm512_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
- (int)(imm), \
- (__v2di)(__m128i)(W), \
- (__mmask8)(U)); })
+ (__m128d)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm512_extracti64x2_epi64((A), (imm)), \
+ (__v2di)(W)); })
#define _mm512_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti64x2_512_mask((__v8di)(__m512i)(A), \
- (int)(imm), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)(U)); })
+ (__m128d)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm512_extracti64x2_epi64((A), (imm)), \
+ (__v2di)_mm_setzero_di()); })
#define _mm512_insertf32x8(A, B, imm) __extension__ ({ \
- (__m512)__builtin_ia32_insertf32x8_mask((__v16sf)(__m512)(A), \
- (__v8sf)(__m256)(B), (int)(imm), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)-1); })
+ (__m512)__builtin_shufflevector((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_castps256_ps512((__m256)(B)),\
+ ((imm) & 0x1) ? 0 : 16, \
+ ((imm) & 0x1) ? 1 : 17, \
+ ((imm) & 0x1) ? 2 : 18, \
+ ((imm) & 0x1) ? 3 : 19, \
+ ((imm) & 0x1) ? 4 : 20, \
+ ((imm) & 0x1) ? 5 : 21, \
+ ((imm) & 0x1) ? 6 : 22, \
+ ((imm) & 0x1) ? 7 : 23, \
+ ((imm) & 0x1) ? 16 : 8, \
+ ((imm) & 0x1) ? 17 : 9, \
+ ((imm) & 0x1) ? 18 : 10, \
+ ((imm) & 0x1) ? 19 : 11, \
+ ((imm) & 0x1) ? 20 : 12, \
+ ((imm) & 0x1) ? 21 : 13, \
+ ((imm) & 0x1) ? 22 : 14, \
+ ((imm) & 0x1) ? 23 : 15); })
#define _mm512_mask_insertf32x8(W, U, A, B, imm) __extension__ ({ \
- (__m512)__builtin_ia32_insertf32x8_mask((__v16sf)(__m512)(A), \
- (__v8sf)(__m256)(B), (int)(imm), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U)); })
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_insertf32x8((A), (B), (imm)), \
+ (__v16sf)(W)); })
#define _mm512_maskz_insertf32x8(U, A, B, imm) __extension__ ({ \
- (__m512)__builtin_ia32_insertf32x8_mask((__v16sf)(__m512)(A), \
- (__v8sf)(__m256)(B), (int)(imm), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U)); })
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_insertf32x8((A), (B), (imm)), \
+ (__v16sf)_mm512_setzero_ps()); })
#define _mm512_insertf64x2(A, B, imm) __extension__ ({ \
- (__m512d)__builtin_ia32_insertf64x2_512_mask((__v8df)(__m512d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(imm), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)-1); })
+ (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_castpd128_pd512((__m128d)(B)),\
+ (((imm) & 0x3) == 0) ? 8 : 0, \
+ (((imm) & 0x3) == 0) ? 9 : 1, \
+ (((imm) & 0x3) == 1) ? 8 : 2, \
+ (((imm) & 0x3) == 1) ? 9 : 3, \
+ (((imm) & 0x3) == 2) ? 8 : 4, \
+ (((imm) & 0x3) == 2) ? 9 : 5, \
+ (((imm) & 0x3) == 3) ? 8 : 6, \
+ (((imm) & 0x3) == 3) ? 9 : 7); })
#define _mm512_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \
- (__m512d)__builtin_ia32_insertf64x2_512_mask((__v8df)(__m512d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(imm), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U)); })
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_insertf64x2((A), (B), (imm)), \
+ (__v8df)(W)); })
#define _mm512_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \
- (__m512d)__builtin_ia32_insertf64x2_512_mask((__v8df)(__m512d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(imm), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U)); })
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_insertf64x2((A), (B), (imm)), \
+ (__v8df)_mm512_setzero_pd()); })
#define _mm512_inserti32x8(A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti32x8_mask((__v16si)(__m512i)(A), \
- (__v8si)(__m256i)(B), (int)(imm), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1); })
+ (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \
+ (__v16si)_mm512_castsi256_si512((__m256i)(B)),\
+ ((imm) & 0x1) ? 0 : 16, \
+ ((imm) & 0x1) ? 1 : 17, \
+ ((imm) & 0x1) ? 2 : 18, \
+ ((imm) & 0x1) ? 3 : 19, \
+ ((imm) & 0x1) ? 4 : 20, \
+ ((imm) & 0x1) ? 5 : 21, \
+ ((imm) & 0x1) ? 6 : 22, \
+ ((imm) & 0x1) ? 7 : 23, \
+ ((imm) & 0x1) ? 16 : 8, \
+ ((imm) & 0x1) ? 17 : 9, \
+ ((imm) & 0x1) ? 18 : 10, \
+ ((imm) & 0x1) ? 19 : 11, \
+ ((imm) & 0x1) ? 20 : 12, \
+ ((imm) & 0x1) ? 21 : 13, \
+ ((imm) & 0x1) ? 22 : 14, \
+ ((imm) & 0x1) ? 23 : 15); })
#define _mm512_mask_inserti32x8(W, U, A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti32x8_mask((__v16si)(__m512i)(A), \
- (__v8si)(__m256i)(B), (int)(imm), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U)); })
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_inserti32x8((A), (B), (imm)), \
+ (__v16si)(W)); })
#define _mm512_maskz_inserti32x8(U, A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti32x8_mask((__v16si)(__m512i)(A), \
- (__v8si)(__m256i)(B), (int)(imm), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U)); })
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_inserti32x8((A), (B), (imm)), \
+ (__v16si)_mm512_setzero_si512()); })
#define _mm512_inserti64x2(A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti64x2_512_mask((__v8di)(__m512i)(A), \
- (__v2di)(__m128i)(B), \
- (int)(imm), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1); })
+ (__m512i)__builtin_shufflevector((__v8di)(__m512i)(A), \
+ (__v8di)_mm512_castsi128_si512((__m128i)(B)),\
+ (((imm) & 0x3) == 0) ? 8 : 0, \
+ (((imm) & 0x3) == 0) ? 9 : 1, \
+ (((imm) & 0x3) == 1) ? 8 : 2, \
+ (((imm) & 0x3) == 1) ? 9 : 3, \
+ (((imm) & 0x3) == 2) ? 8 : 4, \
+ (((imm) & 0x3) == 2) ? 9 : 5, \
+ (((imm) & 0x3) == 3) ? 8 : 6, \
+ (((imm) & 0x3) == 3) ? 9 : 7); })
#define _mm512_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti64x2_512_mask((__v8di)(__m512i)(A), \
- (__v2di)(__m128i)(B), \
- (int)(imm), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U)); })
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_inserti64x2((A), (B), (imm)), \
+ (__v8di)(W)); })
#define _mm512_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti64x2_512_mask((__v8di)(__m512i)(A), \
- (__v2di)(__m128i)(B), \
- (int)(imm), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U)); })
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_inserti64x2((A), (B), (imm)), \
+ (__v8di)_mm512_setzero_si512()); })
#define _mm512_mask_fpclass_ps_mask(U, A, imm) __extension__ ({ \
(__mmask16)__builtin_ia32_fpclassps512_mask((__v16sf)(__m512)(A), \
diff --git a/lib/Headers/avx512fintrin.h b/lib/Headers/avx512fintrin.h
index 0bf6582345d4..e6a7217c8967 100644
--- a/lib/Headers/avx512fintrin.h
+++ b/lib/Headers/avx512fintrin.h
@@ -54,6 +54,19 @@ typedef unsigned short __mmask16;
#define _MM_FROUND_TO_ZERO 0x03
#define _MM_FROUND_CUR_DIRECTION 0x04
+/* Constants for integer comparison predicates */
+typedef enum {
+ _MM_CMPINT_EQ, /* Equal */
+ _MM_CMPINT_LT, /* Less than */
+ _MM_CMPINT_LE, /* Less than or Equal */
+ _MM_CMPINT_UNUSED,
+ _MM_CMPINT_NE, /* Not Equal */
+ _MM_CMPINT_NLT, /* Not Less than */
+#define _MM_CMPINT_GE _MM_CMPINT_NLT /* Greater than or Equal */
+ _MM_CMPINT_NLE /* Not Less than or Equal */
+#define _MM_CMPINT_GT _MM_CMPINT_NLE /* Greater than */
+} _MM_CMPINT_ENUM;
+
typedef enum
{
_MM_PERM_AAAA = 0x00, _MM_PERM_AAAB = 0x01, _MM_PERM_AAAC = 0x02,
@@ -503,6 +516,18 @@ _mm512_castsi512_si256 (__m512i __A)
return (__m256i)__builtin_shufflevector(__A, __A , 0, 1, 2, 3);
}
+static __inline__ __mmask16 __DEFAULT_FN_ATTRS
+_mm512_int2mask(int __a)
+{
+ return (__mmask16)__a;
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_mask2int(__mmask16 __a)
+{
+ return (int)__a;
+}
+
/* Bitwise operators */
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_and_epi32(__m512i __a, __m512i __b)
@@ -737,22 +762,19 @@ _mm512_add_epi64 (__m512i __A, __m512i __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_add_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+_mm512_mask_add_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_add_epi64(__A, __B),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_add_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+_mm512_maskz_add_epi64(__mmask8 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_add_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -762,22 +784,19 @@ _mm512_sub_epi64 (__m512i __A, __m512i __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sub_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
+_mm512_mask_sub_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_sub_epi64(__A, __B),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sub_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
+_mm512_maskz_sub_epi64(__mmask8 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubq512_mask ((__v8di) __A,
- (__v8di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_sub_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -787,22 +806,19 @@ _mm512_add_epi32 (__m512i __A, __m512i __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_add_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+_mm512_mask_add_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_add_epi32(__A, __B),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_maskz_add_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_paddd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_add_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -812,22 +828,19 @@ _mm512_sub_epi32 (__m512i __A, __m512i __B)
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sub_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
+_mm512_mask_sub_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_sub_epi32(__A, __B),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sub_epi32 (__mmask16 __U, __m512i __A, __m512i __B)
+_mm512_maskz_sub_epi32(__mmask16 __U, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_psubd512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_sub_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
#define _mm512_mask_max_round_pd(W, U, A, B, R) __extension__ ({ \
@@ -1403,57 +1416,45 @@ _mm512_maskz_min_epu64 (__mmask8 __M, __m512i __A, __m512i __B)
static __inline __m512i __DEFAULT_FN_ATTRS
_mm512_mul_epi32(__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_pmuldq512((__v16si)__X, (__v16si) __Y);
}
static __inline __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_mul_epi32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
+_mm512_mask_mul_epi32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v8di) __W, __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_mul_epi32(__X, __Y),
+ (__v8di)__W);
}
static __inline __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_mul_epi32 (__mmask8 __M, __m512i __X, __m512i __Y)
+_mm512_maskz_mul_epi32(__mmask8 __M, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_pmuldq512_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v8di)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_mul_epi32(__X, __Y),
+ (__v8di)_mm512_setzero_si512 ());
}
static __inline __m512i __DEFAULT_FN_ATTRS
_mm512_mul_epu32(__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_pmuludq512((__v16si)__X, (__v16si)__Y);
}
static __inline __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_mul_epu32 (__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
+_mm512_mask_mul_epu32(__m512i __W, __mmask8 __M, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v8di) __W, __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_mul_epu32(__X, __Y),
+ (__v8di)__W);
}
static __inline __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_mul_epu32 (__mmask8 __M, __m512i __X, __m512i __Y)
+_mm512_maskz_mul_epu32(__mmask8 __M, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_pmuludq512_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v8di)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__M,
+ (__v8di)_mm512_mul_epu32(__X, __Y),
+ (__v8di)_mm512_setzero_si512 ());
}
static __inline __m512i __DEFAULT_FN_ATTRS
@@ -1463,21 +1464,19 @@ _mm512_mullo_epi32 (__m512i __A, __m512i __B)
}
static __inline __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_mullo_epi32 (__mmask16 __M, __m512i __A, __m512i __B)
+_mm512_maskz_mullo_epi32(__mmask16 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_mullo_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
static __inline __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_mullo_epi32 (__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
+_mm512_mask_mullo_epi32(__m512i __W, __mmask16 __M, __m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_pmulld512_mask ((__v16si) __A,
- (__v16si) __B,
- (__v16si) __W, __M);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__M,
+ (__v16si)_mm512_mullo_epi32(__A, __B),
+ (__v16si)__W);
}
#define _mm512_mask_sqrt_round_pd(W, U, A, R) __extension__ ({ \
@@ -1977,38 +1976,30 @@ _mm_maskz_add_sd(__mmask8 __U,__m128d __A, __m128d __B) {
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_add_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_add_pd(__A, __B),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_add_pd(__mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_addpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) _mm512_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_add_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_add_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __W,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_add_ps(__A, __B),
+ (__v16sf)__W);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_add_ps(__mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_addps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) _mm512_setzero_ps (),
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_add_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
}
#define _mm512_add_round_pd(A, B, R) __extension__ ({ \
@@ -2120,40 +2111,30 @@ _mm_maskz_sub_sd(__mmask8 __U,__m128d __A, __m128d __B) {
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_sub_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_sub_pd(__A, __B),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_sub_pd(__mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_subpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_sub_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_sub_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __W,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_sub_ps(__A, __B),
+ (__v16sf)__W);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_sub_ps(__mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_subps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_sub_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
}
#define _mm512_sub_round_pd(A, B, R) __extension__ ({ \
@@ -2265,40 +2246,30 @@ _mm_maskz_mul_sd(__mmask8 __U,__m128d __A, __m128d __B) {
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_mul_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_mul_pd(__A, __B),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_mul_pd(__mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_mulpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_mul_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_mul_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __W,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_mul_ps(__A, __B),
+ (__v16sf)__W);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_mul_ps(__mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_mulps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_mul_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
}
#define _mm512_mul_round_pd(A, B, R) __extension__ ({ \
@@ -2417,21 +2388,16 @@ _mm512_div_pd(__m512d __a, __m512d __b)
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_div_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df) __W,
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_div_pd(__A, __B),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_div_pd(__mmask8 __U, __m512d __A, __m512d __B) {
- return (__m512d) __builtin_ia32_divpd512_mask ((__v8df) __A,
- (__v8df) __B,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_div_pd(__A, __B),
+ (__v8df)_mm512_setzero_pd());
}
static __inline __m512 __DEFAULT_FN_ATTRS
@@ -2442,21 +2408,16 @@ _mm512_div_ps(__m512 __a, __m512 __b)
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_mask_div_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf) __W,
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_div_ps(__A, __B),
+ (__v16sf)__W);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
_mm512_maskz_div_ps(__mmask16 __U, __m512 __A, __m512 __B) {
- return (__m512) __builtin_ia32_divps512_mask ((__v16sf) __A,
- (__v16sf) __B,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U,
- _MM_FROUND_CUR_DIRECTION);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_div_ps(__A, __B),
+ (__v16sf)_mm512_setzero_ps());
}
#define _mm512_div_round_pd(A, B, R) __extension__ ({ \
@@ -3443,71 +3404,94 @@ _mm512_maskz_permutex2var_epi64 (__mmask8 __U, __m512i __A,
}
#define _mm512_alignr_epi64(A, B, I) __extension__ ({ \
- (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), (int)(I), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1); })
+ (__m512i)__builtin_shufflevector((__v8di)(__m512i)(B), \
+ (__v8di)(__m512i)(A), \
+ ((int)(I) & 0x7) + 0, \
+ ((int)(I) & 0x7) + 1, \
+ ((int)(I) & 0x7) + 2, \
+ ((int)(I) & 0x7) + 3, \
+ ((int)(I) & 0x7) + 4, \
+ ((int)(I) & 0x7) + 5, \
+ ((int)(I) & 0x7) + 6, \
+ ((int)(I) & 0x7) + 7); })
#define _mm512_mask_alignr_epi64(W, U, A, B, imm) __extension__({\
- (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), (int)(imm), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U)); })
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
+ (__v8di)(__m512i)(W)); })
#define _mm512_maskz_alignr_epi64(U, A, B, imm) __extension__({\
- (__m512i)__builtin_ia32_alignq512_mask((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), (int)(imm), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U)); })
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_alignr_epi64((A), (B), (imm)), \
+ (__v8di)_mm512_setzero_si512()); })
#define _mm512_alignr_epi32(A, B, I) __extension__ ({ \
- (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), (int)(I), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1); })
+ (__m512i)__builtin_shufflevector((__v16si)(__m512i)(B), \
+ (__v16si)(__m512i)(A), \
+ ((int)(I) & 0xf) + 0, \
+ ((int)(I) & 0xf) + 1, \
+ ((int)(I) & 0xf) + 2, \
+ ((int)(I) & 0xf) + 3, \
+ ((int)(I) & 0xf) + 4, \
+ ((int)(I) & 0xf) + 5, \
+ ((int)(I) & 0xf) + 6, \
+ ((int)(I) & 0xf) + 7, \
+ ((int)(I) & 0xf) + 8, \
+ ((int)(I) & 0xf) + 9, \
+ ((int)(I) & 0xf) + 10, \
+ ((int)(I) & 0xf) + 11, \
+ ((int)(I) & 0xf) + 12, \
+ ((int)(I) & 0xf) + 13, \
+ ((int)(I) & 0xf) + 14, \
+ ((int)(I) & 0xf) + 15); })
#define _mm512_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({\
- (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), (int)(imm), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U)); })
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
+ (__v16si)(__m512i)(W)); })
#define _mm512_maskz_alignr_epi32(U, A, B, imm) __extension__({\
- (__m512i)__builtin_ia32_alignd512_mask((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), (int)(imm), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U)); })
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_alignr_epi32((A), (B), (imm)), \
+ (__v16si)_mm512_setzero_si512()); })
/* Vector Extract */
-#define _mm512_extractf64x4_pd(A, I) __extension__ ({ \
- (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(I), \
- (__v4df)_mm256_setzero_si256(), \
- (__mmask8)-1); })
+#define _mm512_extractf64x4_pd(A, I) __extension__ ({ \
+ (__m256d)__builtin_shufflevector((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_undefined_pd(), \
+ ((I) & 1) ? 4 : 0, \
+ ((I) & 1) ? 5 : 1, \
+ ((I) & 1) ? 6 : 2, \
+ ((I) & 1) ? 7 : 3); })
#define _mm512_mask_extractf64x4_pd(W, U, A, imm) __extension__ ({\
- (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__v4df)(__m256d)(W), \
- (__mmask8)(U)); })
+ (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm512_extractf64x4_pd((A), (imm)), \
+ (__v4df)(W)); })
#define _mm512_maskz_extractf64x4_pd(U, A, imm) __extension__ ({\
- (__m256d)__builtin_ia32_extractf64x4_mask((__v8df)(__m512d)(A), (int)(imm), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U)); })
+ (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm512_extractf64x4_pd((A), (imm)), \
+ (__v4df)_mm256_setzero_pd()); })
-#define _mm512_extractf32x4_ps(A, I) __extension__ ({ \
- (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(I), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1); })
+#define _mm512_extractf32x4_ps(A, I) __extension__ ({ \
+ (__m128)__builtin_shufflevector((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_undefined_ps(), \
+ 0 + ((I) & 0x3) * 4, \
+ 1 + ((I) & 0x3) * 4, \
+ 2 + ((I) & 0x3) * 4, \
+ 3 + ((I) & 0x3) * 4); })
#define _mm512_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({\
- (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U)); })
+ (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm512_extractf32x4_ps((A), (imm)), \
+ (__v4sf)(W)); })
#define _mm512_maskz_extractf32x4_ps(U, A, imm) __extension__ ({\
- (__m128)__builtin_ia32_extractf32x4_mask((__v16sf)(__m512)(A), (int)(imm), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U)); })
+ (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm512_extractf32x4_ps((A), (imm)), \
+ (__v4sf)_mm_setzero_ps()); })
+
/* Vector Blend */
static __inline __m512d __DEFAULT_FN_ATTRS
@@ -3556,10 +3540,49 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
#define _mm512_cmp_ps_mask(A, B, P) \
_mm512_cmp_round_ps_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
#define _mm512_mask_cmp_ps_mask(U, A, B, P) \
_mm512_mask_cmp_round_ps_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+#define _mm512_cmpeq_ps_mask(A, B) \
+ _mm512_cmp_ps_mask((A), (B), _CMP_EQ_OQ)
+#define _mm512_mask_cmpeq_ps_mask(k, A, B) \
+ _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_EQ_OQ)
+
+#define _mm512_cmplt_ps_mask(A, B) \
+ _mm512_cmp_ps_mask((A), (B), _CMP_LT_OS)
+#define _mm512_mask_cmplt_ps_mask(k, A, B) \
+ _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LT_OS)
+
+#define _mm512_cmple_ps_mask(A, B) \
+ _mm512_cmp_ps_mask((A), (B), _CMP_LE_OS)
+#define _mm512_mask_cmple_ps_mask(k, A, B) \
+ _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_LE_OS)
+
+#define _mm512_cmpunord_ps_mask(A, B) \
+ _mm512_cmp_ps_mask((A), (B), _CMP_UNORD_Q)
+#define _mm512_mask_cmpunord_ps_mask(k, A, B) \
+ _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_UNORD_Q)
+
+#define _mm512_cmpneq_ps_mask(A, B) \
+ _mm512_cmp_ps_mask((A), (B), _CMP_NEQ_UQ)
+#define _mm512_mask_cmpneq_ps_mask(k, A, B) \
+ _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NEQ_UQ)
+
+#define _mm512_cmpnlt_ps_mask(A, B) \
+ _mm512_cmp_ps_mask((A), (B), _CMP_NLT_US)
+#define _mm512_mask_cmpnlt_ps_mask(k, A, B) \
+ _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLT_US)
+
+#define _mm512_cmpnle_ps_mask(A, B) \
+ _mm512_cmp_ps_mask((A), (B), _CMP_NLE_US)
+#define _mm512_mask_cmpnle_ps_mask(k, A, B) \
+ _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_NLE_US)
+
+#define _mm512_cmpord_ps_mask(A, B) \
+ _mm512_cmp_ps_mask((A), (B), _CMP_ORD_Q)
+#define _mm512_mask_cmpord_ps_mask(k, A, B) \
+ _mm512_mask_cmp_ps_mask((k), (A), (B), _CMP_ORD_Q)
+
#define _mm512_cmp_round_pd_mask(A, B, P, R) __extension__ ({ \
(__mmask8)__builtin_ia32_cmppd512_mask((__v8df)(__m512d)(A), \
(__v8df)(__m512d)(B), (int)(P), \
@@ -3572,10 +3595,49 @@ _mm512_mask_blend_epi32(__mmask16 __U, __m512i __A, __m512i __W)
#define _mm512_cmp_pd_mask(A, B, P) \
_mm512_cmp_round_pd_mask((A), (B), (P), _MM_FROUND_CUR_DIRECTION)
-
#define _mm512_mask_cmp_pd_mask(U, A, B, P) \
_mm512_mask_cmp_round_pd_mask((U), (A), (B), (P), _MM_FROUND_CUR_DIRECTION)
+#define _mm512_cmpeq_pd_mask(A, B) \
+ _mm512_cmp_pd_mask((A), (B), _CMP_EQ_OQ)
+#define _mm512_mask_cmpeq_pd_mask(k, A, B) \
+ _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_EQ_OQ)
+
+#define _mm512_cmplt_pd_mask(A, B) \
+ _mm512_cmp_pd_mask((A), (B), _CMP_LT_OS)
+#define _mm512_mask_cmplt_pd_mask(k, A, B) \
+ _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LT_OS)
+
+#define _mm512_cmple_pd_mask(A, B) \
+ _mm512_cmp_pd_mask((A), (B), _CMP_LE_OS)
+#define _mm512_mask_cmple_pd_mask(k, A, B) \
+ _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_LE_OS)
+
+#define _mm512_cmpunord_pd_mask(A, B) \
+ _mm512_cmp_pd_mask((A), (B), _CMP_UNORD_Q)
+#define _mm512_mask_cmpunord_pd_mask(k, A, B) \
+ _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_UNORD_Q)
+
+#define _mm512_cmpneq_pd_mask(A, B) \
+ _mm512_cmp_pd_mask((A), (B), _CMP_NEQ_UQ)
+#define _mm512_mask_cmpneq_pd_mask(k, A, B) \
+ _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NEQ_UQ)
+
+#define _mm512_cmpnlt_pd_mask(A, B) \
+ _mm512_cmp_pd_mask((A), (B), _CMP_NLT_US)
+#define _mm512_mask_cmpnlt_pd_mask(k, A, B) \
+ _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLT_US)
+
+#define _mm512_cmpnle_pd_mask(A, B) \
+ _mm512_cmp_pd_mask((A), (B), _CMP_NLE_US)
+#define _mm512_mask_cmpnle_pd_mask(k, A, B) \
+ _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_NLE_US)
+
+#define _mm512_cmpord_pd_mask(A, B) \
+ _mm512_cmp_pd_mask((A), (B), _CMP_ORD_Q)
+#define _mm512_mask_cmpord_pd_mask(k, A, B) \
+ _mm512_mask_cmp_pd_mask((k), (A), (B), _CMP_ORD_Q)
+
/* Conversion */
#define _mm512_cvtt_roundps_epu32(A, R) __extension__ ({ \
@@ -3682,26 +3744,35 @@ _mm512_maskz_cvtepu32_ps (__mmask16 __U, __m512i __A)
static __inline __m512d __DEFAULT_FN_ATTRS
_mm512_cvtepi32_pd(__m256i __A)
{
- return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) -1);
+ return (__m512d)__builtin_convertvector((__v8si)__A, __v8df);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_cvtepi32_pd (__m512d __W, __mmask8 __U, __m256i __A)
{
- return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
- (__v8df) __W,
- (__mmask8) __U);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+ (__v8df)_mm512_cvtepi32_pd(__A),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepi32_pd (__mmask8 __U, __m256i __A)
{
- return (__m512d) __builtin_ia32_cvtdq2pd512_mask ((__v8si) __A,
- (__v8df) _mm512_setzero_pd (),
- (__mmask8) __U);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+ (__v8df)_mm512_cvtepi32_pd(__A),
+ (__v8df)_mm512_setzero_pd());
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepi32lo_pd(__m512i __A)
+{
+ return (__m512d) _mm512_cvtepi32_pd(_mm512_castsi512_si256(__A));
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepi32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
+{
+ return (__m512d) _mm512_mask_cvtepi32_pd(__W, __U, _mm512_castsi512_si256(__A));
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
@@ -3734,26 +3805,35 @@ _mm512_maskz_cvtepi32_ps (__mmask16 __U, __m512i __A)
static __inline __m512d __DEFAULT_FN_ATTRS
_mm512_cvtepu32_pd(__m256i __A)
{
- return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) -1);
+ return (__m512d)__builtin_convertvector((__v8su)__A, __v8df);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_cvtepu32_pd (__m512d __W, __mmask8 __U, __m256i __A)
{
- return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
- (__v8df) __W,
- (__mmask8) __U);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+ (__v8df)_mm512_cvtepu32_pd(__A),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_maskz_cvtepu32_pd (__mmask8 __U, __m256i __A)
{
- return (__m512d) __builtin_ia32_cvtudq2pd512_mask ((__v8si) __A,
- (__v8df) _mm512_setzero_pd (),
- (__mmask8) __U);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8) __U,
+ (__v8df)_mm512_cvtepu32_pd(__A),
+ (__v8df)_mm512_setzero_pd());
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_cvtepu32lo_pd(__m512i __A)
+{
+ return (__m512d) _mm512_cvtepu32_pd(_mm512_castsi512_si256(__A));
+}
+
+static __inline__ __m512d __DEFAULT_FN_ATTRS
+_mm512_mask_cvtepu32lo_pd(__m512d __W, __mmask8 __U,__m512i __A)
+{
+ return (__m512d) _mm512_mask_cvtepu32_pd(__W, __U, _mm512_castsi512_si256(__A));
}
#define _mm512_cvt_roundpd_ps(A, R) __extension__ ({ \
@@ -3798,6 +3878,24 @@ _mm512_maskz_cvtpd_ps (__mmask8 __U, __m512d __A)
_MM_FROUND_CUR_DIRECTION);
}
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_cvtpd_pslo (__m512d __A)
+{
+ return (__m512) __builtin_shufflevector((__v8sf) _mm512_cvtpd_ps(__A),
+ (__v8sf) _mm256_setzero_ps (),
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpd_pslo (__m512 __W, __mmask8 __U,__m512d __A)
+{
+ return (__m512) __builtin_shufflevector (
+ (__v8sf) _mm512_mask_cvtpd_ps (_mm512_castps512_ps256(__W),
+ __U, __A),
+ (__v8sf) _mm256_setzero_ps (),
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+}
+
#define _mm512_cvt_roundps_ph(A, I) __extension__ ({ \
(__m256i)__builtin_ia32_vcvtps2ph512_mask((__v16sf)(__m512)(A), (int)(I), \
(__v16hi)_mm256_undefined_si256(), \
@@ -4919,263 +5017,227 @@ _mm512_mask_cmpneq_epu64_mask(__mmask8 __u, __m512i __a, __m512i __b) {
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepi8_epi32 (__m128i __A)
+_mm512_cvtepi8_epi32(__m128i __A)
{
- return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m512i)__builtin_convertvector((__v16qs)__A, __v16si);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepi8_epi32 (__m512i __W, __mmask16 __U, __m128i __A)
+_mm512_mask_cvtepi8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_cvtepi8_epi32(__A),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepi8_epi32 (__mmask16 __U, __m128i __A)
+_mm512_maskz_cvtepi8_epi32(__mmask16 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovsxbd512_mask ((__v16qi) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_cvtepi8_epi32(__A),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepi8_epi64 (__m128i __A)
+_mm512_cvtepi8_epi64(__m128i __A)
{
- return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ /* This function always performs a signed extension, but __v16qi is a char
+ which may be signed or unsigned, so use __v16qs. */
+ return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__A, (__v16qs)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepi8_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+_mm512_mask_cvtepi8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepi8_epi64(__A),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+_mm512_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovsxbq512_mask ((__v16qi) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepi8_epi64(__A),
+ (__v8di)_mm512_setzero_si512 ());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepi32_epi64 (__m256i __X)
+_mm512_cvtepi32_epi64(__m256i __X)
{
- return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_convertvector((__v8si)__X, __v8di);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepi32_epi64 (__m512i __W, __mmask8 __U, __m256i __X)
+_mm512_mask_cvtepi32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
{
- return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepi32_epi64(__X),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepi32_epi64 (__mmask8 __U, __m256i __X)
+_mm512_maskz_cvtepi32_epi64(__mmask8 __U, __m256i __X)
{
- return (__m512i) __builtin_ia32_pmovsxdq512_mask ((__v8si) __X,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepi32_epi64(__X),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepi16_epi32 (__m256i __A)
+_mm512_cvtepi16_epi32(__m256i __A)
{
- return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_convertvector((__v16hi)__A, __v16si);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepi16_epi32 (__m512i __W, __mmask16 __U, __m256i __A)
+_mm512_mask_cvtepi16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
{
- return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_cvtepi16_epi32(__A),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepi16_epi32 (__mmask16 __U, __m256i __A)
+_mm512_maskz_cvtepi16_epi32(__mmask16 __U, __m256i __A)
{
- return (__m512i) __builtin_ia32_pmovsxwd512_mask ((__v16hi) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_cvtepi16_epi32(__A),
+ (__v16si)_mm512_setzero_si512 ());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepi16_epi64 (__m128i __A)
+_mm512_cvtepi16_epi64(__m128i __A)
{
- return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_convertvector((__v8hi)__A, __v8di);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepi16_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+_mm512_mask_cvtepi16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepi16_epi64(__A),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+_mm512_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovsxwq512_mask ((__v8hi) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepi16_epi64(__A),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepu8_epi32 (__m128i __A)
+_mm512_cvtepu8_epi32(__m128i __A)
{
- return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_convertvector((__v16qu)__A, __v16si);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepu8_epi32 (__m512i __W, __mmask16 __U, __m128i __A)
+_mm512_mask_cvtepu8_epi32(__m512i __W, __mmask16 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_cvtepu8_epi32(__A),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepu8_epi32 (__mmask16 __U, __m128i __A)
+_mm512_maskz_cvtepu8_epi32(__mmask16 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovzxbd512_mask ((__v16qi) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_cvtepu8_epi32(__A),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepu8_epi64 (__m128i __A)
+_mm512_cvtepu8_epi64(__m128i __A)
{
- return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__A, (__v16qu)__A, 0, 1, 2, 3, 4, 5, 6, 7), __v8di);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepu8_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+_mm512_mask_cvtepu8_epi64(__m512i __W, __mmask8 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepu8_epi64(__A),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+_mm512_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovzxbq512_mask ((__v16qi) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepu8_epi64(__A),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepu32_epi64 (__m256i __X)
+_mm512_cvtepu32_epi64(__m256i __X)
{
- return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_convertvector((__v8su)__X, __v8di);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepu32_epi64 (__m512i __W, __mmask8 __U, __m256i __X)
+_mm512_mask_cvtepu32_epi64(__m512i __W, __mmask8 __U, __m256i __X)
{
- return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepu32_epi64(__X),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepu32_epi64 (__mmask8 __U, __m256i __X)
+_mm512_maskz_cvtepu32_epi64(__mmask8 __U, __m256i __X)
{
- return (__m512i) __builtin_ia32_pmovzxdq512_mask ((__v8si) __X,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepu32_epi64(__X),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepu16_epi32 (__m256i __A)
+_mm512_cvtepu16_epi32(__m256i __A)
{
- return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_convertvector((__v16hu)__A, __v16si);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepu16_epi32 (__m512i __W, __mmask16 __U, __m256i __A)
+_mm512_mask_cvtepu16_epi32(__m512i __W, __mmask16 __U, __m256i __A)
{
- return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_cvtepu16_epi32(__A),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepu16_epi32 (__mmask16 __U, __m256i __A)
+_mm512_maskz_cvtepu16_epi32(__mmask16 __U, __m256i __A)
{
- return (__m512i) __builtin_ia32_pmovzxwd512_mask ((__v16hi) __A,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_cvtepu16_epi32(__A),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_cvtepu16_epi64 (__m128i __A)
+_mm512_cvtepu16_epi64(__m128i __A)
{
- return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_convertvector((__v8hu)__A, __v8di);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_cvtepu16_epi64 (__m512i __W, __mmask8 __U, __m128i __A)
+_mm512_mask_cvtepu16_epi64(__m512i __W, __mmask8 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepu16_epi64(__A),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+_mm512_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
{
- return (__m512i) __builtin_ia32_pmovzxwq512_mask ((__v8hi) __A,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_cvtepu16_epi64(__A),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
@@ -5393,67 +5455,91 @@ _mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
(__v8di)_mm512_setzero_si512(), \
(__mmask8)(U)); })
-#define _mm512_slli_epi32(A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1); })
-
-#define _mm512_mask_slli_epi32(W, U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U)); })
-
-#define _mm512_maskz_slli_epi32(U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_pslldi512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_slli_epi32(__m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
+}
-#define _mm512_slli_epi64(A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_slli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_slli_epi32(__A, __B),
+ (__v16si)__W);
+}
-#define _mm512_mask_slli_epi64(W, U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, int __B) {
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_slli_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
+}
-#define _mm512_maskz_slli_epi64(U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psllqi512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_slli_epi64(__m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_slli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_slli_epi64(__A, __B),
+ (__v8di)__W);
+}
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_slli_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
+}
-#define _mm512_srli_epi32(A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srli_epi32(__m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
+}
-#define _mm512_mask_srli_epi32(W, U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srli_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_srli_epi32(__A, __B),
+ (__v16si)__W);
+}
-#define _mm512_maskz_srli_epi32(U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psrldi512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, int __B) {
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_srli_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
+}
-#define _mm512_srli_epi64(A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srli_epi64(__m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
+}
-#define _mm512_mask_srli_epi64(W, U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srli_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_srli_epi64(__A, __B),
+ (__v8di)__W);
+}
-#define _mm512_maskz_srli_epi64(U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psrlqi512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srli_epi64(__mmask8 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_srli_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
+}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_load_epi32 (__m512i __W, __mmask16 __U, void const *__P)
@@ -5911,8 +5997,10 @@ _mm512_kmov (__mmask16 __A)
(int)__builtin_ia32_vcomiss((__v4sf)(__m128)(A), (__v4sf)(__m128)(B), \
(int)(P), (int)(R)); })
+#ifdef __x86_64__
#define _mm_cvt_roundsd_si64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+#endif
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I,
@@ -5926,351 +6014,267 @@ _mm512_mask2_permutex2var_epi32 (__m512i __A, __m512i __I,
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_sll_epi32 (__m512i __A, __m128i __B)
+_mm512_sll_epi32(__m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
- (__v4si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_pslld512((__v16si) __A, (__v4si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sll_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
+_mm512_mask_sll_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
- (__v4si) __B,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_sll_epi32(__A, __B),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sll_epi32 (__mmask16 __U, __m512i __A, __m128i __B)
+_mm512_maskz_sll_epi32(__mmask16 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_pslld512_mask ((__v16si) __A,
- (__v4si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_sll_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_sll_epi64 (__m512i __A, __m128i __B)
+_mm512_sll_epi64(__m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
- (__v2di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_psllq512((__v8di)__A, (__v2di)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sll_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
+_mm512_mask_sll_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
- (__v2di) __B,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_sll_epi64(__A, __B),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sll_epi64 (__mmask8 __U, __m512i __A, __m128i __B)
+_mm512_maskz_sll_epi64(__mmask8 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psllq512_mask ((__v8di) __A,
- (__v2di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_sll_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_sllv_epi32 (__m512i __X, __m512i __Y)
+_mm512_sllv_epi32(__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_psllv16si((__v16si)__X, (__v16si)__Y);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sllv_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
+_mm512_mask_sllv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_sllv_epi32(__X, __Y),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sllv_epi32 (__mmask16 __U, __m512i __X, __m512i __Y)
+_mm512_maskz_sllv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psllv16si_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_sllv_epi32(__X, __Y),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_sllv_epi64 (__m512i __X, __m512i __Y)
+_mm512_sllv_epi64(__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di)
- _mm512_undefined_pd (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_psllv8di((__v8di)__X, (__v8di)__Y);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sllv_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
+_mm512_mask_sllv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_sllv_epi64(__X, __Y),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sllv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
+_mm512_maskz_sllv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psllv8di_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_sllv_epi64(__X, __Y),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_sra_epi32 (__m512i __A, __m128i __B)
+_mm512_sra_epi32(__m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
- (__v4si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_psrad512((__v16si) __A, (__v4si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sra_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
+_mm512_mask_sra_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
- (__v4si) __B,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_sra_epi32(__A, __B),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sra_epi32 (__mmask16 __U, __m512i __A, __m128i __B)
+_mm512_maskz_sra_epi32(__mmask16 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrad512_mask ((__v16si) __A,
- (__v4si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_sra_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_sra_epi64 (__m512i __A, __m128i __B)
+_mm512_sra_epi64(__m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
- (__v2di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_psraq512((__v8di)__A, (__v2di)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_sra_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
+_mm512_mask_sra_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
- (__v2di) __B,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_sra_epi64(__A, __B),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_sra_epi64 (__mmask8 __U, __m512i __A, __m128i __B)
+_mm512_maskz_sra_epi64(__mmask8 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psraq512_mask ((__v8di) __A,
- (__v2di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_sra_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_srav_epi32 (__m512i __X, __m512i __Y)
+_mm512_srav_epi32(__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_psrav16si((__v16si)__X, (__v16si)__Y);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_srav_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
+_mm512_mask_srav_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_srav_epi32(__X, __Y),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_srav_epi32 (__mmask16 __U, __m512i __X, __m512i __Y)
+_mm512_maskz_srav_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrav16si_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_srav_epi32(__X, __Y),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_srav_epi64 (__m512i __X, __m512i __Y)
+_mm512_srav_epi64(__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_psrav8di((__v8di)__X, (__v8di)__Y);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_srav_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
+_mm512_mask_srav_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_srav_epi64(__X, __Y),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_srav_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
+_mm512_maskz_srav_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrav8di_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_srav_epi64(__X, __Y),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_srl_epi32 (__m512i __A, __m128i __B)
+_mm512_srl_epi32(__m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
- (__v4si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_psrld512((__v16si) __A, (__v4si)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_srl_epi32 (__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
+_mm512_mask_srl_epi32(__m512i __W, __mmask16 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
- (__v4si) __B,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_srl_epi32(__A, __B),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_srl_epi32 (__mmask16 __U, __m512i __A, __m128i __B)
+_mm512_maskz_srl_epi32(__mmask16 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrld512_mask ((__v16si) __A,
- (__v4si) __B,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_srl_epi32(__A, __B),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_srl_epi64 (__m512i __A, __m128i __B)
+_mm512_srl_epi64(__m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
- (__v2di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_psrlq512((__v8di)__A, (__v2di)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_srl_epi64 (__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
+_mm512_mask_srl_epi64(__m512i __W, __mmask8 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
- (__v2di) __B,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_srl_epi64(__A, __B),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_srl_epi64 (__mmask8 __U, __m512i __A, __m128i __B)
+_mm512_maskz_srl_epi64(__mmask8 __U, __m512i __A, __m128i __B)
{
- return (__m512i) __builtin_ia32_psrlq512_mask ((__v8di) __A,
- (__v2di) __B,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_srl_epi64(__A, __B),
+ (__v8di)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_srlv_epi32 (__m512i __X, __m512i __Y)
+_mm512_srlv_epi32(__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) -1);
+ return (__m512i)__builtin_ia32_psrlv16si((__v16si)__X, (__v16si)__Y);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_srlv_epi32 (__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
+_mm512_mask_srlv_epi32(__m512i __W, __mmask16 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v16si) __W,
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_srlv_epi32(__X, __Y),
+ (__v16si)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_srlv_epi32 (__mmask16 __U, __m512i __X, __m512i __Y)
+_mm512_maskz_srlv_epi32(__mmask16 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrlv16si_mask ((__v16si) __X,
- (__v16si) __Y,
- (__v16si)
- _mm512_setzero_si512 (),
- (__mmask16) __U);
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U,
+ (__v16si)_mm512_srlv_epi32(__X, __Y),
+ (__v16si)_mm512_setzero_si512());
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_srlv_epi64 (__m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) -1);
+ return (__m512i)__builtin_ia32_psrlv8di((__v8di)__X, (__v8di)__Y);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_mask_srlv_epi64 (__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
+_mm512_mask_srlv_epi64(__m512i __W, __mmask8 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di) __W,
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_srlv_epi64(__X, __Y),
+ (__v8di)__W);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS
-_mm512_maskz_srlv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
+_mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
{
- return (__m512i) __builtin_ia32_psrlv8di_mask ((__v8di) __X,
- (__v8di) __Y,
- (__v8di)
- _mm512_setzero_si512 (),
- (__mmask8) __U);
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U,
+ (__v8di)_mm512_srlv_epi64(__X, __Y),
+ (__v8di)_mm512_setzero_si512());
}
#define _mm512_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
@@ -6309,8 +6313,10 @@ _mm512_maskz_srlv_epi64 (__mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)(__m512i)(C), (int)(imm), \
(__mmask8)(U)); })
+#ifdef __x86_64__
#define _mm_cvt_roundsd_i64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvtsd2si64((__v2df)(__m128d)(A), (int)(R)); })
+#endif
#define _mm_cvt_roundsd_si32(A, R) __extension__ ({ \
(int)__builtin_ia32_vcvtsd2si32((__v2df)(__m128d)(A), (int)(R)); })
@@ -6328,6 +6334,7 @@ _mm_cvtsd_u32 (__m128d __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvt_roundsd_u64(A, R) __extension__ ({ \
(unsigned long long)__builtin_ia32_vcvtsd2usi64((__v2df)(__m128d)(A), \
(int)(R)); })
@@ -6339,6 +6346,7 @@ _mm_cvtsd_u64 (__m128d __A)
__A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvt_roundss_si32(A, R) __extension__ ({ \
(int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
@@ -6346,11 +6354,13 @@ _mm_cvtsd_u64 (__m128d __A)
#define _mm_cvt_roundss_i32(A, R) __extension__ ({ \
(int)__builtin_ia32_vcvtss2si32((__v4sf)(__m128)(A), (int)(R)); })
+#ifdef __x86_64__
#define _mm_cvt_roundss_si64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
#define _mm_cvt_roundss_i64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvtss2si64((__v4sf)(__m128)(A), (int)(R)); })
+#endif
#define _mm_cvt_roundss_u32(A, R) __extension__ ({ \
(unsigned int)__builtin_ia32_vcvtss2usi32((__v4sf)(__m128)(A), (int)(R)); })
@@ -6362,6 +6372,7 @@ _mm_cvtss_u32 (__m128 __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvt_roundss_u64(A, R) __extension__ ({ \
(unsigned long long)__builtin_ia32_vcvtss2usi64((__v4sf)(__m128)(A), \
(int)(R)); })
@@ -6373,6 +6384,7 @@ _mm_cvtss_u64 (__m128 __A)
__A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvtt_roundsd_i32(A, R) __extension__ ({ \
(int)__builtin_ia32_vcvttsd2si32((__v2df)(__m128d)(A), (int)(R)); })
@@ -6387,6 +6399,7 @@ _mm_cvttsd_i32 (__m128d __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvtt_roundsd_si64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvttsd2si64((__v2df)(__m128d)(A), (int)(R)); })
@@ -6399,6 +6412,7 @@ _mm_cvttsd_i64 (__m128d __A)
return (long long) __builtin_ia32_vcvttsd2si64 ((__v2df) __A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvtt_roundsd_u32(A, R) __extension__ ({ \
(unsigned int)__builtin_ia32_vcvttsd2usi32((__v2df)(__m128d)(A), (int)(R)); })
@@ -6410,6 +6424,7 @@ _mm_cvttsd_u32 (__m128d __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvtt_roundsd_u64(A, R) __extension__ ({ \
(unsigned long long)__builtin_ia32_vcvttsd2usi64((__v2df)(__m128d)(A), \
(int)(R)); })
@@ -6421,6 +6436,7 @@ _mm_cvttsd_u64 (__m128d __A)
__A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvtt_roundss_i32(A, R) __extension__ ({ \
(int)__builtin_ia32_vcvttss2si32((__v4sf)(__m128)(A), (int)(R)); })
@@ -6435,6 +6451,7 @@ _mm_cvttss_i32 (__m128 __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvtt_roundss_i64(A, R) __extension__ ({ \
(long long)__builtin_ia32_vcvttss2si64((__v4sf)(__m128)(A), (int)(R)); })
@@ -6447,6 +6464,7 @@ _mm_cvttss_i64 (__m128 __A)
return (long long) __builtin_ia32_vcvttss2si64 ((__v4sf) __A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvtt_roundss_u32(A, R) __extension__ ({ \
(unsigned int)__builtin_ia32_vcvttss2usi32((__v4sf)(__m128)(A), (int)(R)); })
@@ -6458,6 +6476,7 @@ _mm_cvttss_u32 (__m128 __A)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvtt_roundss_u64(A, R) __extension__ ({ \
(unsigned long long)__builtin_ia32_vcvttss2usi64((__v4sf)(__m128)(A), \
(int)(R)); })
@@ -6469,6 +6488,7 @@ _mm_cvttss_u64 (__m128 __A)
__A,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask2_permutex2var_pd (__m512d __A, __m512i __I, __mmask8 __U,
@@ -6556,61 +6576,47 @@ _mm512_mask2_permutex2var_epi64 (__m512i __A, __m512i __I,
(__v16sf)_mm512_setzero_ps()); })
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_permutevar_pd (__m512d __A, __m512i __C)
+_mm512_permutevar_pd(__m512d __A, __m512i __C)
{
- return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
- (__v8di) __C,
- (__v8df)
- _mm512_undefined_pd (),
- (__mmask8) -1);
+ return (__m512d)__builtin_ia32_vpermilvarpd512((__v8df)__A, (__v8di)__C);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_mask_permutevar_pd (__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
+_mm512_mask_permutevar_pd(__m512d __W, __mmask8 __U, __m512d __A, __m512i __C)
{
- return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
- (__v8di) __C,
- (__v8df) __W,
- (__mmask8) __U);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_permutevar_pd(__A, __C),
+ (__v8df)__W);
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_maskz_permutevar_pd (__mmask8 __U, __m512d __A, __m512i __C)
+_mm512_maskz_permutevar_pd(__mmask8 __U, __m512d __A, __m512i __C)
{
- return (__m512d) __builtin_ia32_vpermilvarpd512_mask ((__v8df) __A,
- (__v8di) __C,
- (__v8df)
- _mm512_setzero_pd (),
- (__mmask8) __U);
+ return (__m512d)__builtin_ia32_selectpd_512((__mmask8)__U,
+ (__v8df)_mm512_permutevar_pd(__A, __C),
+ (__v8df)_mm512_setzero_pd());
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_permutevar_ps (__m512 __A, __m512i __C)
+_mm512_permutevar_ps(__m512 __A, __m512i __C)
{
- return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
- (__v16si) __C,
- (__v16sf)
- _mm512_undefined_ps (),
- (__mmask16) -1);
+ return (__m512)__builtin_ia32_vpermilvarps512((__v16sf)__A, (__v16si)__C);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_mask_permutevar_ps (__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
+_mm512_mask_permutevar_ps(__m512 __W, __mmask16 __U, __m512 __A, __m512i __C)
{
- return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
- (__v16si) __C,
- (__v16sf) __W,
- (__mmask16) __U);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_permutevar_ps(__A, __C),
+ (__v16sf)__W);
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_maskz_permutevar_ps (__mmask16 __U, __m512 __A, __m512i __C)
+_mm512_maskz_permutevar_ps(__mmask16 __U, __m512 __A, __m512i __C)
{
- return (__m512) __builtin_ia32_vpermilvarps512_mask ((__v16sf) __A,
- (__v16si) __C,
- (__v16sf)
- _mm512_setzero_ps (),
- (__mmask16) __U);
+ return (__m512)__builtin_ia32_selectps_512((__mmask16)__U,
+ (__v16sf)_mm512_permutevar_ps(__A, __C),
+ (__v16sf)_mm512_setzero_ps());
}
static __inline __m512d __DEFAULT_FN_ATTRS
@@ -7028,35 +7034,48 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
(__mmask8)(U), \
_MM_FROUND_CUR_DIRECTION); })
-#define _mm512_srai_epi32(A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srai_epi32(__m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
+}
-#define _mm512_mask_srai_epi32(W, U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srai_epi32(__m512i __W, __mmask16 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, \
+ (__v16si)_mm512_srai_epi32(__A, __B), \
+ (__v16si)__W);
+}
-#define _mm512_maskz_srai_epi32(U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psradi512_mask((__v16si)(__m512i)(A), (int)(B), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A, int __B) {
+ return (__m512i)__builtin_ia32_selectd_512((__mmask16)__U, \
+ (__v16si)_mm512_srai_epi32(__A, __B), \
+ (__v16si)_mm512_setzero_si512());
+}
-#define _mm512_srai_epi64(A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_srai_epi64(__m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
+}
-#define _mm512_mask_srai_epi64(W, U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_mask_srai_epi64(__m512i __W, __mmask8 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, \
+ (__v8di)_mm512_srai_epi64(__A, __B), \
+ (__v8di)__W);
+}
-#define _mm512_maskz_srai_epi64(U, A, B) __extension__ ({ \
- (__m512i)__builtin_ia32_psraqi512_mask((__v8di)(__m512i)(A), (int)(B), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U)); })
+static __inline__ __m512i __DEFAULT_FN_ATTRS
+_mm512_maskz_srai_epi64(__mmask8 __U, __m512i __A, int __B)
+{
+ return (__m512i)__builtin_ia32_selectq_512((__mmask8)__U, \
+ (__v8di)_mm512_srai_epi64(__A, __B), \
+ (__v8di)_mm512_setzero_si512());
+}
#define _mm512_shuffle_f32x4(A, B, imm) __extension__ ({ \
(__m512)__builtin_ia32_shuf_f32x4_mask((__v16sf)(__m512)(A), \
@@ -7832,107 +7851,145 @@ _mm512_mask_cvtepi64_storeu_epi16 (void *__P, __mmask8 __M, __m512i __A)
__builtin_ia32_pmovqw512mem_mask ((__v8hi *) __P, (__v8di) __A, __M);
}
-#define _mm512_extracti32x4_epi32(A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1); })
+#define _mm512_extracti32x4_epi32(A, imm) __extension__ ({ \
+ (__m128i)__builtin_shufflevector((__v16si)(__m512i)(A), \
+ (__v16si)_mm512_undefined_epi32(), \
+ 0 + ((imm) & 0x3) * 4, \
+ 1 + ((imm) & 0x3) * 4, \
+ 2 + ((imm) & 0x3) * 4, \
+ 3 + ((imm) & 0x3) * 4); })
#define _mm512_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)(__m128i)(W), \
- (__mmask8)(U)); })
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, \
+ (__v4si)_mm512_extracti32x4_epi32((A), (imm)), \
+ (__v4si)__W); })
#define _mm512_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti32x4_mask((__v16si)(__m512i)(A), (int)(imm), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)__U, \
+ (__v4si)_mm512_extracti32x4_epi32((A), (imm)), \
+ (__v4si)_mm_setzero_si128()); })
-#define _mm512_extracti64x4_epi64(A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)_mm256_undefined_si256(), \
- (__mmask8)-1); })
+#define _mm512_extracti64x4_epi64(A, imm) __extension__ ({ \
+ (__m256i)__builtin_shufflevector((__v8di)(__m512i)(A), \
+ (__v8di)_mm512_undefined_epi32(), \
+ ((imm) & 1) ? 4 : 0, \
+ ((imm) & 1) ? 5 : 1, \
+ ((imm) & 1) ? 6 : 2, \
+ ((imm) & 1) ? 7 : 3); })
#define _mm512_mask_extracti64x4_epi64(W, U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)(__m256i)(W), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
+ (__v4di)_mm512_extracti64x4_epi64((A), (imm)), \
+ (__v4di)__W); })
#define _mm512_maskz_extracti64x4_epi64(U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_extracti64x4_mask((__v8di)(__m512i)(A), (int)(imm), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
+ (__v4di)_mm512_extracti64x4_epi64((A), (imm)), \
+ (__v4di)_mm256_setzero_si256()); })
#define _mm512_insertf64x4(A, B, imm) __extension__ ({ \
- (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
- (__v4df)(__m256d)(B), (int)(imm), \
- (__v8df)_mm512_undefined_pd(), \
- (__mmask8)-1); })
+ (__m512d)__builtin_shufflevector((__v8df)(__m512d)(A), \
+ (__v8df)_mm512_castpd256_pd512((__m256d)(B)), \
+ ((imm) & 0x1) ? 0 : 8, \
+ ((imm) & 0x1) ? 1 : 9, \
+ ((imm) & 0x1) ? 2 : 10, \
+ ((imm) & 0x1) ? 3 : 11, \
+ ((imm) & 0x1) ? 8 : 4, \
+ ((imm) & 0x1) ? 9 : 5, \
+ ((imm) & 0x1) ? 10 : 6, \
+ ((imm) & 0x1) ? 11 : 7); })
#define _mm512_mask_insertf64x4(W, U, A, B, imm) __extension__ ({ \
- (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
- (__v4df)(__m256d)(B), (int)(imm), \
- (__v8df)(__m512d)(W), \
- (__mmask8)(U)); })
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
+ (__v8df)(W)); })
#define _mm512_maskz_insertf64x4(U, A, B, imm) __extension__ ({ \
- (__m512d)__builtin_ia32_insertf64x4_mask((__v8df)(__m512d)(A), \
- (__v4df)(__m256d)(B), (int)(imm), \
- (__v8df)_mm512_setzero_pd(), \
- (__mmask8)(U)); })
+ (__m512d)__builtin_ia32_selectpd_512((__mmask8)(U), \
+ (__v8df)_mm512_insertf64x4((A), (B), (imm)), \
+ (__v8df)_mm512_setzero_pd()); })
#define _mm512_inserti64x4(A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
- (__v4di)(__m256i)(B), (int)(imm), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)-1); })
+ (__m512i)__builtin_shufflevector((__v8di)(__m512i)(A), \
+ (__v8di)_mm512_castsi256_si512((__m256i)(B)), \
+ ((imm) & 0x1) ? 0 : 8, \
+ ((imm) & 0x1) ? 1 : 9, \
+ ((imm) & 0x1) ? 2 : 10, \
+ ((imm) & 0x1) ? 3 : 11, \
+ ((imm) & 0x1) ? 8 : 4, \
+ ((imm) & 0x1) ? 9 : 5, \
+ ((imm) & 0x1) ? 10 : 6, \
+ ((imm) & 0x1) ? 11 : 7); })
#define _mm512_mask_inserti64x4(W, U, A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
- (__v4di)(__m256i)(B), (int)(imm), \
- (__v8di)(__m512i)(W), \
- (__mmask8)(U)); })
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
+ (__v8di)(W)); })
#define _mm512_maskz_inserti64x4(U, A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti64x4_mask((__v8di)(__m512i)(A), \
- (__v4di)(__m256i)(B), (int)(imm), \
- (__v8di)_mm512_setzero_si512(), \
- (__mmask8)(U)); })
+ (__m512i)__builtin_ia32_selectq_512((__mmask8)(U), \
+ (__v8di)_mm512_inserti64x4((A), (B), (imm)), \
+ (__v8di)_mm512_setzero_si512()); })
#define _mm512_insertf32x4(A, B, imm) __extension__ ({ \
- (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
- (__v4sf)(__m128)(B), (int)(imm), \
- (__v16sf)_mm512_undefined_ps(), \
- (__mmask16)-1); })
+ (__m512)__builtin_shufflevector((__v16sf)(__m512)(A), \
+ (__v16sf)_mm512_castps128_ps512((__m128)(B)),\
+ (((imm) & 0x3) == 0) ? 16 : 0, \
+ (((imm) & 0x3) == 0) ? 17 : 1, \
+ (((imm) & 0x3) == 0) ? 18 : 2, \
+ (((imm) & 0x3) == 0) ? 19 : 3, \
+ (((imm) & 0x3) == 1) ? 16 : 4, \
+ (((imm) & 0x3) == 1) ? 17 : 5, \
+ (((imm) & 0x3) == 1) ? 18 : 6, \
+ (((imm) & 0x3) == 1) ? 19 : 7, \
+ (((imm) & 0x3) == 2) ? 16 : 8, \
+ (((imm) & 0x3) == 2) ? 17 : 9, \
+ (((imm) & 0x3) == 2) ? 18 : 10, \
+ (((imm) & 0x3) == 2) ? 19 : 11, \
+ (((imm) & 0x3) == 3) ? 16 : 12, \
+ (((imm) & 0x3) == 3) ? 17 : 13, \
+ (((imm) & 0x3) == 3) ? 18 : 14, \
+ (((imm) & 0x3) == 3) ? 19 : 15); })
#define _mm512_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \
- (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
- (__v4sf)(__m128)(B), (int)(imm), \
- (__v16sf)(__m512)(W), \
- (__mmask16)(U)); })
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
+ (__v16sf)(W)); })
#define _mm512_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \
- (__m512)__builtin_ia32_insertf32x4_mask((__v16sf)(__m512)(A), \
- (__v4sf)(__m128)(B), (int)(imm), \
- (__v16sf)_mm512_setzero_ps(), \
- (__mmask16)(U)); })
+ (__m512)__builtin_ia32_selectps_512((__mmask16)(U), \
+ (__v16sf)_mm512_insertf32x4((A), (B), (imm)), \
+ (__v16sf)_mm512_setzero_ps()); })
#define _mm512_inserti32x4(A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
- (__v4si)(__m128i)(B), (int)(imm), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)-1); })
+ (__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \
+ (__v16si)_mm512_castsi128_si512((__m128i)(B)),\
+ (((imm) & 0x3) == 0) ? 16 : 0, \
+ (((imm) & 0x3) == 0) ? 17 : 1, \
+ (((imm) & 0x3) == 0) ? 18 : 2, \
+ (((imm) & 0x3) == 0) ? 19 : 3, \
+ (((imm) & 0x3) == 1) ? 16 : 4, \
+ (((imm) & 0x3) == 1) ? 17 : 5, \
+ (((imm) & 0x3) == 1) ? 18 : 6, \
+ (((imm) & 0x3) == 1) ? 19 : 7, \
+ (((imm) & 0x3) == 2) ? 16 : 8, \
+ (((imm) & 0x3) == 2) ? 17 : 9, \
+ (((imm) & 0x3) == 2) ? 18 : 10, \
+ (((imm) & 0x3) == 2) ? 19 : 11, \
+ (((imm) & 0x3) == 3) ? 16 : 12, \
+ (((imm) & 0x3) == 3) ? 17 : 13, \
+ (((imm) & 0x3) == 3) ? 18 : 14, \
+ (((imm) & 0x3) == 3) ? 19 : 15); })
#define _mm512_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
- (__v4si)(__m128i)(B), (int)(imm), \
- (__v16si)(__m512i)(W), \
- (__mmask16)(U)); })
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
+ (__v16si)(W)); })
#define _mm512_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \
- (__m512i)__builtin_ia32_inserti32x4_mask((__v16si)(__m512i)(A), \
- (__v4si)(__m128i)(B), (int)(imm), \
- (__v16si)_mm512_setzero_si512(), \
- (__mmask16)(U)); })
+ (__m512i)__builtin_ia32_selectd_512((__mmask16)(U), \
+ (__v16si)_mm512_inserti32x4((A), (B), (imm)), \
+ (__v16si)_mm512_setzero_si512()); })
#define _mm512_getmant_round_pd(A, B, C, R) __extension__ ({ \
(__m512d)__builtin_ia32_getmantpd512_mask((__v8df)(__m512d)(A), \
@@ -8275,17 +8332,17 @@ __builtin_ia32_gatherdiv16sf ((__v8sf) __v1_old,\
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_fmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ (__v4sf) __A,
(__v4sf) __B,
- (__v4sf) __W,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask_fmadd_round_ss(W, U, A, B, R) __extension__({\
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (__mmask8)(U), \
(int)(R)); })
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -8323,17 +8380,17 @@ _mm_mask3_fmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_fmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __A,
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ (__v4sf) __A,
-(__v4sf) __B,
- (__v4sf) __W,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask_fmsub_round_ss(W, U, A, B, R) __extension__ ({\
- (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ (__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (__mmask8)(U), \
(int)(R)); })
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -8355,33 +8412,33 @@ _mm_maskz_fmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask3_fmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask3 ((__v4sf) __W,
+ return (__m128) __builtin_ia32_vfmsubss3_mask3 ((__v4sf) __W,
(__v4sf) __X,
- -(__v4sf) __Y,
+ (__v4sf) __Y,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask3_fmsub_round_ss(W, X, Y, U, R) __extension__ ({\
- (__m128)__builtin_ia32_vfmaddss3_mask3((__v4sf)(__m128)(W), \
+ (__m128)__builtin_ia32_vfmsubss3_mask3((__v4sf)(__m128)(W), \
(__v4sf)(__m128)(X), \
- -(__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
(int)(R)); })
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_fnmadd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask (-(__v4sf) __A,
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ -(__v4sf) __A,
(__v4sf) __B,
- (__v4sf) __W,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask_fnmadd_round_ss(W, U, A, B, R) __extension__ ({\
- (__m128)__builtin_ia32_vfmaddss3_mask(-(__v4sf)(__m128)(A), \
- (__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(A), \
+ (__v4sf)(__m128)(B), (__mmask8)(U), \
(int)(R)); })
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -8419,17 +8476,17 @@ _mm_mask3_fnmadd_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_fnmsub_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask (-(__v4sf) __A,
+ return (__m128) __builtin_ia32_vfmaddss3_mask ((__v4sf) __W,
+ -(__v4sf) __A,
-(__v4sf) __B,
- (__v4sf) __W,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask_fnmsub_round_ss(W, U, A, B, R) __extension__ ({\
- (__m128)__builtin_ia32_vfmaddss3_mask(-(__v4sf)(__m128)(A), \
- -(__v4sf)(__m128)(B), \
- (__v4sf)(__m128)(W), (__mmask8)(U), \
+ (__m128)__builtin_ia32_vfmaddss3_mask((__v4sf)(__m128)(W), \
+ -(__v4sf)(__m128)(A), \
+ -(__v4sf)(__m128)(B), (__mmask8)(U), \
(int)(R)); })
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -8451,33 +8508,33 @@ _mm_maskz_fnmsub_ss (__mmask8 __U, __m128 __A, __m128 __B, __m128 __C)
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask3_fnmsub_ss (__m128 __W, __m128 __X, __m128 __Y, __mmask8 __U)
{
- return (__m128) __builtin_ia32_vfmaddss3_mask3 (-(__v4sf) __W,
+ return (__m128) __builtin_ia32_vfnmsubss3_mask3 ((__v4sf) __W,
(__v4sf) __X,
- -(__v4sf) __Y,
+ (__v4sf) __Y,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask3_fnmsub_round_ss(W, X, Y, U, R) __extension__({\
- (__m128)__builtin_ia32_vfmaddss3_mask3(-(__v4sf)(__m128)(W), \
+ (__m128)__builtin_ia32_vfnmsubss3_mask3((__v4sf)(__m128)(W), \
(__v4sf)(__m128)(X), \
- -(__v4sf)(__m128)(Y), (__mmask8)(U), \
+ (__v4sf)(__m128)(Y), (__mmask8)(U), \
(int)(R)); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_fmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __A,
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W,
+ (__v2df) __A,
(__v2df) __B,
- (__v2df) __W,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask_fmadd_round_sd(W, U, A, B, R) __extension__({\
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (__mmask8)(U), \
(int)(R)); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -8515,17 +8572,17 @@ _mm_mask3_fmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_fmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __A,
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W,
+ (__v2df) __A,
-(__v2df) __B,
- (__v2df) __W,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask_fmsub_round_sd(W, U, A, B, R) __extension__ ({\
- (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ (__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), (__mmask8)(U), \
(int)(R)); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -8547,33 +8604,33 @@ _mm_maskz_fmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask3_fmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask3 ((__v2df) __W,
+ return (__m128d) __builtin_ia32_vfmsubsd3_mask3 ((__v2df) __W,
(__v2df) __X,
- -(__v2df) __Y,
+ (__v2df) __Y,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask3_fmsub_round_sd(W, X, Y, U, R) __extension__ ({\
- (__m128d)__builtin_ia32_vfmaddsd3_mask3((__v2df)(__m128d)(W), \
+ (__m128d)__builtin_ia32_vfmsubsd3_mask3((__v2df)(__m128d)(W), \
(__v2df)(__m128d)(X), \
- -(__v2df)(__m128d)(Y), \
+ (__v2df)(__m128d)(Y), \
(__mmask8)(U), (int)(R)); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_fnmadd_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ( -(__v2df) __A,
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W,
+ -(__v2df) __A,
(__v2df) __B,
- (__v2df) __W,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask_fnmadd_round_sd(W, U, A, B, R) __extension__ ({\
- (__m128d)__builtin_ia32_vfmaddsd3_mask(-(__v2df)(__m128d)(A), \
- (__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(A), \
+ (__v2df)(__m128d)(B), (__mmask8)(U), \
(int)(R)); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -8611,17 +8668,17 @@ _mm_mask3_fnmadd_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_fnmsub_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask ( -(__v2df) __A,
+ return (__m128d) __builtin_ia32_vfmaddsd3_mask ( (__v2df) __W,
+ -(__v2df) __A,
-(__v2df) __B,
- (__v2df) __W,
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask_fnmsub_round_sd(W, U, A, B, R) __extension__ ({\
- (__m128d)__builtin_ia32_vfmaddsd3_mask(-(__v2df)(__m128d)(A), \
- -(__v2df)(__m128d)(B), \
- (__v2df)(__m128d)(W), (__mmask8)(U), \
+ (__m128d)__builtin_ia32_vfmaddsd3_mask((__v2df)(__m128d)(W), \
+ -(__v2df)(__m128d)(A), \
+ -(__v2df)(__m128d)(B), (__mmask8)(U), \
(int)(R)); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -8644,17 +8701,17 @@ _mm_maskz_fnmsub_sd (__mmask8 __U, __m128d __A, __m128d __B, __m128d __C)
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask3_fnmsub_sd (__m128d __W, __m128d __X, __m128d __Y, __mmask8 __U)
{
- return (__m128d) __builtin_ia32_vfmaddsd3_mask3 (-(__v2df) (__W),
+ return (__m128d) __builtin_ia32_vfnmsubsd3_mask3 ((__v2df) (__W),
(__v2df) __X,
- -(__v2df) (__Y),
+ (__v2df) (__Y),
(__mmask8) __U,
_MM_FROUND_CUR_DIRECTION);
}
#define _mm_mask3_fnmsub_round_sd(W, X, Y, U, R) __extension__({\
- (__m128d)__builtin_ia32_vfmaddsd3_mask3(-(__v2df)(__m128d)(W), \
+ (__m128d)__builtin_ia32_vfnmsubsd3_mask3((__v2df)(__m128d)(W), \
(__v2df)(__m128d)(X), \
- -(__v2df)(__m128d)(Y), \
+ (__v2df)(__m128d)(Y), \
(__mmask8)(U), (int)(R)); })
#define _mm512_permutex_pd(X, C) __extension__ ({ \
@@ -9041,6 +9098,101 @@ _mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A)
(__v16sf)_mm512_setzero_ps());
}
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B)
+{
+ __m128 res = __A;
+ res[0] = (__U & 1) ? __B[0] : __W[0];
+ return res;
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B)
+{
+ __m128 res = __A;
+ res[0] = (__U & 1) ? __B[0] : 0;
+ return res;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B)
+{
+ __m128d res = __A;
+ res[0] = (__U & 1) ? __B[0] : __W[0];
+ return res;
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B)
+{
+ __m128d res = __A;
+ res[0] = (__U & 1) ? __B[0] : 0;
+ return res;
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A)
+{
+ __builtin_ia32_storess128_mask ((__v16sf *)__W,
+ (__v16sf) _mm512_castps128_ps512(__A),
+ (__mmask16) __U & (__mmask16)1);
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS
+_mm_mask_store_sd (double * __W, __mmask8 __U, __m128d __A)
+{
+ __builtin_ia32_storesd128_mask ((__v8df *)__W,
+ (__v8df) _mm512_castpd128_pd512(__A),
+ (__mmask8) __U & 1);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_mask_load_ss (__m128 __W, __mmask8 __U, const float* __A)
+{
+ __m128 src = (__v4sf) __builtin_shufflevector((__v4sf) __W,
+ (__v4sf) {0.0, 0.0, 0.0, 0.0},
+ 0, 4, 4, 4);
+
+ return (__m128) __builtin_shufflevector(
+ __builtin_ia32_loadss128_mask ((__v16sf *) __A,
+ (__v16sf) _mm512_castps128_ps512(src),
+ (__mmask16) __U & 1),
+ _mm512_undefined_ps(), 0, 1, 2, 3);
+}
+
+static __inline__ __m128 __DEFAULT_FN_ATTRS
+_mm_maskz_load_ss (__mmask8 __U, const float* __A)
+{
+ return (__m128) __builtin_shufflevector(
+ __builtin_ia32_loadss128_mask ((__v16sf *) __A,
+ (__v16sf) _mm512_setzero_ps(),
+ (__mmask16) __U & 1),
+ _mm512_undefined_ps(), 0, 1, 2, 3);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_mask_load_sd (__m128d __W, __mmask8 __U, const double* __A)
+{
+ __m128d src = (__v2df) __builtin_shufflevector((__v2df) __W,
+ (__v2df) {0.0, 0.0}, 0, 2);
+
+ return (__m128d) __builtin_shufflevector(
+ __builtin_ia32_loadsd128_mask ((__v8df *) __A,
+ (__v8df) _mm512_castpd128_pd512(src),
+ (__mmask8) __U & 1),
+ _mm512_undefined_pd(), 0, 1);
+}
+
+static __inline__ __m128d __DEFAULT_FN_ATTRS
+_mm_maskz_load_sd (__mmask8 __U, const double* __A)
+{
+ return (__m128d) __builtin_shufflevector(
+ __builtin_ia32_loadsd128_mask ((__v8df *) __A,
+ (__v8df) _mm512_setzero_pd(),
+ (__mmask8) __U & 1),
+ _mm512_undefined_pd(), 0, 1);
+}
+
#define _mm512_shuffle_epi32(A, I) __extension__ ({ \
(__m512i)__builtin_shufflevector((__v16si)(__m512i)(A), \
(__v16si)_mm512_undefined_epi32(), \
@@ -9243,6 +9395,18 @@ _mm512_maskz_cvtps_pd (__mmask8 __U, __m256 __A)
_MM_FROUND_CUR_DIRECTION);
}
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_cvtpslo_pd (__m512 __A)
+{
+ return (__m512) _mm512_cvtps_pd(_mm512_castps512_ps256(__A));
+}
+
+static __inline__ __m512 __DEFAULT_FN_ATTRS
+_mm512_mask_cvtpslo_pd (__m512d __W, __mmask8 __U, __m512 __A)
+{
+ return (__m512) _mm512_mask_cvtps_pd(__W, __U, _mm512_castps512_ps256(__A));
+}
+
static __inline__ __m512d __DEFAULT_FN_ATTRS
_mm512_mask_mov_pd (__m512d __W, __mmask8 __U, __m512d __A)
{
@@ -9340,14 +9504,17 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
}
#define _mm_cvtss_i32 _mm_cvtss_si32
-#define _mm_cvtss_i64 _mm_cvtss_si64
#define _mm_cvtsd_i32 _mm_cvtsd_si32
-#define _mm_cvtsd_i64 _mm_cvtsd_si64
#define _mm_cvti32_sd _mm_cvtsi32_sd
-#define _mm_cvti64_sd _mm_cvtsi64_sd
#define _mm_cvti32_ss _mm_cvtsi32_ss
+#ifdef __x86_64__
+#define _mm_cvtss_i64 _mm_cvtss_si64
+#define _mm_cvtsd_i64 _mm_cvtsd_si64
+#define _mm_cvti64_sd _mm_cvtsi64_sd
#define _mm_cvti64_ss _mm_cvtsi64_ss
+#endif
+#ifdef __x86_64__
#define _mm_cvt_roundi64_sd(A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
(int)(R)); })
@@ -9355,6 +9522,7 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
#define _mm_cvt_roundsi64_sd(A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_cvtsi2sd64((__v2df)(__m128d)(A), (long long)(B), \
(int)(R)); })
+#endif
#define _mm_cvt_roundsi32_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
@@ -9362,6 +9530,7 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
#define _mm_cvt_roundi32_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtsi2ss32((__v4sf)(__m128)(A), (int)(B), (int)(R)); })
+#ifdef __x86_64__
#define _mm_cvt_roundsi64_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
(int)(R)); })
@@ -9369,6 +9538,7 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B)
#define _mm_cvt_roundi64_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtsi2ss64((__v4sf)(__m128)(A), (long long)(B), \
(int)(R)); })
+#endif
#define _mm_cvt_roundss_sd(A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_cvtss2sd_round_mask((__v2df)(__m128d)(A), \
@@ -9412,6 +9582,7 @@ _mm_cvtu32_sd (__m128d __A, unsigned __B)
return (__m128d) __builtin_ia32_cvtusi2sd32 ((__v2df) __A, __B);
}
+#ifdef __x86_64__
#define _mm_cvt_roundu64_sd(A, B, R) __extension__ ({ \
(__m128d)__builtin_ia32_cvtusi2sd64((__v2df)(__m128d)(A), \
(unsigned long long)(B), (int)(R)); })
@@ -9422,6 +9593,7 @@ _mm_cvtu64_sd (__m128d __A, unsigned long long __B)
return (__m128d) __builtin_ia32_cvtusi2sd64 ((__v2df) __A, __B,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
#define _mm_cvt_roundu32_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtusi2ss32((__v4sf)(__m128)(A), (unsigned int)(B), \
@@ -9434,6 +9606,7 @@ _mm_cvtu32_ss (__m128 __A, unsigned __B)
_MM_FROUND_CUR_DIRECTION);
}
+#ifdef __x86_64__
#define _mm_cvt_roundu64_ss(A, B, R) __extension__ ({ \
(__m128)__builtin_ia32_cvtusi2ss64((__v4sf)(__m128)(A), \
(unsigned long long)(B), (int)(R)); })
@@ -9444,6 +9617,7 @@ _mm_cvtu64_ss (__m128 __A, unsigned long long __B)
return (__m128) __builtin_ia32_cvtusi2ss64 ((__v4sf) __A, __B,
_MM_FROUND_CUR_DIRECTION);
}
+#endif
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
@@ -9452,12 +9626,14 @@ _mm512_mask_set1_epi32 (__m512i __O, __mmask16 __M, int __A)
__M);
}
+#ifdef __x86_64__
static __inline__ __m512i __DEFAULT_FN_ATTRS
_mm512_mask_set1_epi64 (__m512i __O, __mmask8 __M, long long __A)
{
return (__m512i) __builtin_ia32_pbroadcastq512_gpr_mask (__A, (__v8di) __O,
__M);
}
+#endif
static __inline __m512i __DEFAULT_FN_ATTRS
_mm512_set_epi32 (int __A, int __B, int __C, int __D,
@@ -9514,27 +9690,553 @@ _mm512_set_ps (float __A, float __B, float __C, float __D,
(e4),(e3),(e2),(e1),(e0))
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_abs_ps(__m512 A)
+_mm512_abs_ps(__m512 __A)
{
- return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)A) ;
+ return (__m512)_mm512_and_epi32(_mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
}
static __inline__ __m512 __DEFAULT_FN_ATTRS
-_mm512_mask_abs_ps(__m512 W, __mmask16 K, __m512 A)
+_mm512_mask_abs_ps(__m512 __W, __mmask16 __K, __m512 __A)
{
- return (__m512)_mm512_mask_and_epi32((__m512i)W, K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)A) ;
+ return (__m512)_mm512_mask_and_epi32((__m512i)__W, __K, _mm512_set1_epi32(0x7FFFFFFF),(__m512i)__A) ;
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_abs_pd(__m512d A)
+_mm512_abs_pd(__m512d __A)
{
- return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)A) ;
+ return (__m512d)_mm512_and_epi64(_mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A) ;
}
static __inline__ __m512d __DEFAULT_FN_ATTRS
-_mm512_mask_abs_pd(__m512d W, __mmask8 K, __m512d A)
-{
- return (__m512d)_mm512_mask_and_epi64((__v8di)W, K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)A);
+_mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
+{
+ return (__m512d)_mm512_mask_and_epi64((__v8di)__W, __K, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),(__v8di)__A);
+}
+
+// Vector-reduction arithmetic accepts vectors as inputs and produces scalars as
+// outputs. This class of vector operation forms the basis of many scientific
+// computations. In vector-reduction arithmetic, the evaluation off is
+// independent of the order of the input elements of V.
+
+// Used bisection method. At each step, we partition the vector with previous
+// step in half, and the operation is performed on its two halves.
+// This takes log2(n) steps where n is the number of elements in the vector.
+
+// Vec512 - Vector with size 512.
+// Operator - Can be one of following: +,*,&,|
+// T2 - Can get 'i' for int and 'f' for float.
+// T1 - Can get 'i' for int and 'd' for double.
+
+#define _mm512_reduce_operator_64bit(Vec512, Operator, T2, T1) \
+ __extension__({ \
+ __m256##T1 Vec256 = __builtin_shufflevector( \
+ (__v8d##T2)Vec512, \
+ (__v8d##T2)Vec512, \
+ 0, 1, 2, 3) \
+ Operator \
+ __builtin_shufflevector( \
+ (__v8d##T2)Vec512, \
+ (__v8d##T2)Vec512, \
+ 4, 5, 6, 7); \
+ __m128##T1 Vec128 = __builtin_shufflevector( \
+ (__v4d##T2)Vec256, \
+ (__v4d##T2)Vec256, \
+ 0, 1) \
+ Operator \
+ __builtin_shufflevector( \
+ (__v4d##T2)Vec256, \
+ (__v4d##T2)Vec256, \
+ 2, 3); \
+ Vec128 = __builtin_shufflevector((__v2d##T2)Vec128, \
+ (__v2d##T2)Vec128, 0, -1) \
+ Operator \
+ __builtin_shufflevector((__v2d##T2)Vec128, \
+ (__v2d##T2)Vec128, 1, -1); \
+ return Vec128[0]; \
+ })
+
+static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_add_epi64(__m512i __W) {
+ _mm512_reduce_operator_64bit(__W, +, i, i);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_mul_epi64(__m512i __W) {
+ _mm512_reduce_operator_64bit(__W, *, i, i);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_and_epi64(__m512i __W) {
+ _mm512_reduce_operator_64bit(__W, &, i, i);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_or_epi64(__m512i __W) {
+ _mm512_reduce_operator_64bit(__W, |, i, i);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS _mm512_reduce_add_pd(__m512d __W) {
+ _mm512_reduce_operator_64bit(__W, +, f, d);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS _mm512_reduce_mul_pd(__m512d __W) {
+ _mm512_reduce_operator_64bit(__W, *, f, d);
+}
+
+// Vec512 - Vector with size 512.
+// Vec512Neutral - All vector elements set to the identity element.
+// Identity element: {+,0},{*,1},{&,0xFFFFFFFFFFFFFFFF},{|,0}
+// Operator - Can be one of following: +,*,&,|
+// Mask - Intrinsic Mask
+// T2 - Can get 'i' for int and 'f' for float.
+// T1 - Can get 'i' for int and 'd' for packed double-precision.
+// T3 - Can be Pd for packed double or q for q-word.
+
+#define _mm512_mask_reduce_operator_64bit(Vec512, Vec512Neutral, Operator, \
+ Mask, T2, T1, T3) \
+ __extension__({ \
+ Vec512 = __builtin_ia32_select##T3##_512( \
+ (__mmask8)Mask, \
+ (__v8d##T2)Vec512, \
+ (__v8d##T2)Vec512Neutral); \
+ _mm512_reduce_operator_64bit(Vec512, Operator, T2, T1); \
+ })
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
+ _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0), +, __M, i, i, q);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
+ _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(1), *, __M, i, i, q);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
+ _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF),
+ &, __M, i, i, q);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
+ _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0), |, __M,
+ i, i, q);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) {
+ _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_pd(0), +, __M,
+ f, d, pd);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
+ _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_pd(1), *, __M,
+ f, d, pd);
+}
+
+// Vec512 - Vector with size 512.
+// Operator - Can be one of following: +,*,&,|
+// T2 - Can get 'i' for int and ' ' for packed single.
+// T1 - Can get 'i' for int and 'f' for float.
+
+#define _mm512_reduce_operator_32bit(Vec512, Operator, T2, T1) __extension__({ \
+ __m256##T1 Vec256 = \
+ (__m256##T1)(__builtin_shufflevector( \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512, \
+ 0, 1, 2, 3, 4, 5, 6, 7) \
+ Operator \
+ __builtin_shufflevector( \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512, \
+ 8, 9, 10, 11, 12, 13, 14, 15)); \
+ __m128##T1 Vec128 = \
+ (__m128##T1)(__builtin_shufflevector( \
+ (__v8s##T2)Vec256, \
+ (__v8s##T2)Vec256, \
+ 0, 1, 2, 3) \
+ Operator \
+ __builtin_shufflevector( \
+ (__v8s##T2)Vec256, \
+ (__v8s##T2)Vec256, \
+ 4, 5, 6, 7)); \
+ Vec128 = (__m128##T1)(__builtin_shufflevector( \
+ (__v4s##T2)Vec128, \
+ (__v4s##T2)Vec128, \
+ 0, 1, -1, -1) \
+ Operator \
+ __builtin_shufflevector( \
+ (__v4s##T2)Vec128, \
+ (__v4s##T2)Vec128, \
+ 2, 3, -1, -1)); \
+ Vec128 = (__m128##T1)(__builtin_shufflevector( \
+ (__v4s##T2)Vec128, \
+ (__v4s##T2)Vec128, \
+ 0, -1, -1, -1) \
+ Operator \
+ __builtin_shufflevector( \
+ (__v4s##T2)Vec128, \
+ (__v4s##T2)Vec128, \
+ 1, -1, -1, -1)); \
+ return Vec128[0]; \
+ })
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_reduce_add_epi32(__m512i __W) {
+ _mm512_reduce_operator_32bit(__W, +, i, i);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_reduce_mul_epi32(__m512i __W) {
+ _mm512_reduce_operator_32bit(__W, *, i, i);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_reduce_and_epi32(__m512i __W) {
+ _mm512_reduce_operator_32bit(__W, &, i, i);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_reduce_or_epi32(__m512i __W) {
+ _mm512_reduce_operator_32bit(__W, |, i, i);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS
+_mm512_reduce_add_ps(__m512 __W) {
+ _mm512_reduce_operator_32bit(__W, +, f, );
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS
+_mm512_reduce_mul_ps(__m512 __W) {
+ _mm512_reduce_operator_32bit(__W, *, f, );
+}
+
+// Vec512 - Vector with size 512.
+// Vec512Neutral - All vector elements set to the identity element.
+// Identity element: {+,0},{*,1},{&,0xFFFFFFFF},{|,0}
+// Operator - Can be one of following: +,*,&,|
+// Mask - Intrinsic Mask
+// T2 - Can get 'i' for int and 'f' for float.
+// T1 - Can get 'i' for int and 'd' for double.
+// T3 - Can be Ps for packed single or d for d-word.
+
+#define _mm512_mask_reduce_operator_32bit(Vec512, Vec512Neutral, Operator, \
+ Mask, T2, T1, T3) \
+ __extension__({ \
+ Vec512 = (__m512##T1)__builtin_ia32_select##T3##_512( \
+ (__mmask16)Mask, \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512Neutral); \
+ _mm512_reduce_operator_32bit(Vec512, Operator, T2, T1); \
+ })
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
+ _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0), +, __M, i, i, d);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
+ _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(1), *, __M, i, i, d);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
+ _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0xFFFFFFFF), &, __M,
+ i, i, d);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
+ _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0), |, __M, i, i, d);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) {
+ _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_ps(0), +, __M, f, , ps);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) {
+ _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_ps(1), *, __M, f, , ps);
+}
+
+// Used bisection method. At each step, we partition the vector with previous
+// step in half, and the operation is performed on its two halves.
+// This takes log2(n) steps where n is the number of elements in the vector.
+// This macro uses only intrinsics from the AVX512F feature.
+
+// Vec512 - Vector with size of 512.
+// IntrinName - Can be one of following: {max|min}_{epi64|epu64|pd} for example:
+// __mm512_max_epi64
+// T1 - Can get 'i' for int and 'd' for double.[__m512{i|d}]
+// T2 - Can get 'i' for int and 'f' for float. [__v8d{i|f}]
+
+#define _mm512_reduce_maxMin_64bit(Vec512, IntrinName, T1, T2) __extension__({ \
+ Vec512 = _mm512_##IntrinName( \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v8d##T2)Vec512, \
+ (__v8d##T2)Vec512, \
+ 0, 1, 2, 3, -1, -1, -1, -1), \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v8d##T2)Vec512, \
+ (__v8d##T2)Vec512, \
+ 4, 5, 6, 7, -1, -1, -1, -1)); \
+ Vec512 = _mm512_##IntrinName( \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v8d##T2)Vec512, \
+ (__v8d##T2)Vec512, \
+ 0, 1, -1, -1, -1, -1, -1, -1),\
+ (__m512##T1)__builtin_shufflevector( \
+ (__v8d##T2)Vec512, \
+ (__v8d##T2)Vec512, \
+ 2, 3, -1, -1, -1, -1, -1, \
+ -1)); \
+ Vec512 = _mm512_##IntrinName( \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v8d##T2)Vec512, \
+ (__v8d##T2)Vec512, \
+ 0, -1, -1, -1, -1, -1, -1, -1),\
+ (__m512##T1)__builtin_shufflevector( \
+ (__v8d##T2)Vec512, \
+ (__v8d##T2)Vec512, \
+ 1, -1, -1, -1, -1, -1, -1, -1))\
+ ; \
+ return Vec512[0]; \
+ })
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm512_reduce_max_epi64(__m512i __V) {
+ _mm512_reduce_maxMin_64bit(__V, max_epi64, i, i);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm512_reduce_max_epu64(__m512i __V) {
+ _mm512_reduce_maxMin_64bit(__V, max_epu64, i, i);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS
+_mm512_reduce_max_pd(__m512d __V) {
+ _mm512_reduce_maxMin_64bit(__V, max_pd, d, f);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_min_epi64
+(__m512i __V) {
+ _mm512_reduce_maxMin_64bit(__V, min_epi64, i, i);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm512_reduce_min_epu64(__m512i __V) {
+ _mm512_reduce_maxMin_64bit(__V, min_epu64, i, i);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS
+_mm512_reduce_min_pd(__m512d __V) {
+ _mm512_reduce_maxMin_64bit(__V, min_pd, d, f);
+}
+
+// Vec512 - Vector with size 512.
+// Vec512Neutral - A 512 length vector with elements set to the identity element
+// Identity element: {max_epi,0x8000000000000000}
+// {max_epu,0x0000000000000000}
+// {max_pd, 0xFFF0000000000000}
+// {min_epi,0x7FFFFFFFFFFFFFFF}
+// {min_epu,0xFFFFFFFFFFFFFFFF}
+// {min_pd, 0x7FF0000000000000}
+//
+// IntrinName - Can be one of following: {max|min}_{epi64|epu64|pd} for example:
+// __mm512_max_epi64
+// T1 - Can get 'i' for int and 'd' for double.[__m512{i|d}]
+// T2 - Can get 'i' for int and 'f' for float. [__v8d{i|f}]
+// T3 - Can get 'q' q word and 'pd' for packed double.
+// [__builtin_ia32_select{q|pd}_512]
+// Mask - Intrinsic Mask
+
+#define _mm512_mask_reduce_maxMin_64bit(Vec512, Vec512Neutral, IntrinName, T1, \
+ T2, T3, Mask) \
+ __extension__({ \
+ Vec512 = (__m512##T1)__builtin_ia32_select##T3##_512( \
+ (__mmask8)Mask, \
+ (__v8d##T2)Vec512, \
+ (__v8d##T2)Vec512Neutral); \
+ _mm512_reduce_maxMin_64bit(Vec512, IntrinName, T1, T2); \
+ })
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_max_epi64(__mmask8 __M, __m512i __V) {
+ _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0x8000000000000000),
+ max_epi64, i, i, q, __M);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_max_epu64(__mmask8 __M, __m512i __V) {
+ _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0x0000000000000000),
+ max_epu64, i, i, q, __M);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) {
+ _mm512_mask_reduce_maxMin_64bit(__V, -_mm512_set1_pd(__builtin_inf()),
+ max_pd, d, f, pd, __M);
+}
+
+static __inline__ long long __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
+ _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0x7FFFFFFFFFFFFFFF),
+ min_epi64, i, i, q, __M);
+}
+
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
+ _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF),
+ min_epu64, i, i, q, __M);
+}
+
+static __inline__ double __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) {
+ _mm512_mask_reduce_maxMin_64bit(__V, _mm512_set1_pd(__builtin_inf()),
+ min_pd, d, f, pd, __M);
+}
+
+// Vec512 - Vector with size 512.
+// IntrinName - Can be one of following: {max|min}_{epi32|epu32|ps} for example:
+// __mm512_max_epi32
+// T1 - Can get 'i' for int and ' ' .[__m512{i|}]
+// T2 - Can get 'i' for int and 'f' for float.[__v16s{i|f}]
+
+#define _mm512_reduce_maxMin_32bit(Vec512, IntrinName, T1, T2) __extension__({ \
+ Vec512 = _mm512_##IntrinName( \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512, \
+ 0, 1, 2, 3, 4, 5, 6, 7, \
+ -1, -1, -1, -1, -1, -1, -1, -1), \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512, \
+ 8, 9, 10, 11, 12, 13, 14, 15, \
+ -1, -1, -1, -1, -1, -1, -1, -1)); \
+ Vec512 = _mm512_##IntrinName( \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512, \
+ 0, 1, 2, 3, -1, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1), \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512, \
+ 4, 5, 6, 7, -1, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1)); \
+ Vec512 = _mm512_##IntrinName( \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512, \
+ 0, 1, -1, -1, -1, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1), \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512, \
+ 2, 3, -1, -1, -1, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1)); \
+ Vec512 = _mm512_##IntrinName( \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512, \
+ 0, -1, -1, -1, -1, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1), \
+ (__m512##T1)__builtin_shufflevector( \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512, \
+ 1, -1, -1, -1, -1, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1)); \
+ return Vec512[0]; \
+ })
+
+static __inline__ int __DEFAULT_FN_ATTRS _mm512_reduce_max_epi32(__m512i a) {
+ _mm512_reduce_maxMin_32bit(a, max_epi32, i, i);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm512_reduce_max_epu32(__m512i a) {
+ _mm512_reduce_maxMin_32bit(a, max_epu32, i, i);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS _mm512_reduce_max_ps(__m512 a) {
+ _mm512_reduce_maxMin_32bit(a, max_ps, , f);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS _mm512_reduce_min_epi32(__m512i a) {
+ _mm512_reduce_maxMin_32bit(a, min_epi32, i, i);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm512_reduce_min_epu32(__m512i a) {
+ _mm512_reduce_maxMin_32bit(a, min_epu32, i, i);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS _mm512_reduce_min_ps(__m512 a) {
+ _mm512_reduce_maxMin_32bit(a, min_ps, , f);
+}
+
+// Vec512 - Vector with size 512.
+// Vec512Neutral - A 512 length vector with elements set to the identity element
+// Identity element: {max_epi,0x80000000}
+// {max_epu,0x00000000}
+// {max_ps, 0xFF800000}
+// {min_epi,0x7FFFFFFF}
+// {min_epu,0xFFFFFFFF}
+// {min_ps, 0x7F800000}
+//
+// IntrinName - Can be one of following: {max|min}_{epi32|epu32|ps} for example:
+// __mm512_max_epi32
+// T1 - Can get 'i' for int and ' ' .[__m512{i|}]
+// T2 - Can get 'i' for int and 'f' for float.[__v16s{i|f}]
+// T3 - Can get 'q' q word and 'pd' for packed double.
+// [__builtin_ia32_select{q|pd}_512]
+// Mask - Intrinsic Mask
+
+#define _mm512_mask_reduce_maxMin_32bit(Vec512, Vec512Neutral, IntrinName, T1, \
+ T2, T3, Mask) \
+ __extension__({ \
+ Vec512 = (__m512##T1)__builtin_ia32_select##T3##_512( \
+ (__mmask16)Mask, \
+ (__v16s##T2)Vec512, \
+ (__v16s##T2)Vec512Neutral); \
+ _mm512_reduce_maxMin_32bit(Vec512, IntrinName, T1, T2); \
+ })
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_max_epi32(__mmask16 __M, __m512i __V) {
+ _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0x80000000), max_epi32,
+ i, i, d, __M);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_max_epu32(__mmask16 __M, __m512i __V) {
+ _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0x00000000), max_epu32,
+ i, i, d, __M);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) {
+ _mm512_mask_reduce_maxMin_32bit(__V,-_mm512_set1_ps(__builtin_inff()), max_ps, , f,
+ ps, __M);
+}
+
+static __inline__ int __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
+ _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0x7FFFFFFF), min_epi32,
+ i, i, d, __M);
+}
+
+static __inline__ unsigned int __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
+ _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_epi32(0xFFFFFFFF), min_epu32,
+ i, i, d, __M);
+}
+
+static __inline__ float __DEFAULT_FN_ATTRS
+_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) {
+ _mm512_mask_reduce_maxMin_32bit(__V, _mm512_set1_ps(__builtin_inff()), min_ps, , f,
+ ps, __M);
}
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/avx512vlbwintrin.h b/lib/Headers/avx512vlbwintrin.h
index 990e992a113f..3b58d043395a 100644
--- a/lib/Headers/avx512vlbwintrin.h
+++ b/lib/Headers/avx512vlbwintrin.h
@@ -615,172 +615,143 @@ _mm256_mask_cmpneq_epu16_mask(__mmask16 __u, __m256i __a, __m256i __b) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_add_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){
- return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __U);
+_mm256_mask_add_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B){
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_add_epi8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_add_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_paddb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi)
- _mm256_setzero_si256 (),
- (__mmask32) __U);
+_mm256_maskz_add_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_add_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_add_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+_mm256_mask_add_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_add_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_add_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_paddw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+_mm256_maskz_add_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_add_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sub_epi8 (__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __U);
+_mm256_mask_sub_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_sub_epi8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sub_epi8 (__mmask32 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_psubb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi)
- _mm256_setzero_si256 (),
- (__mmask32) __U);
+_mm256_maskz_sub_epi8(__mmask32 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_sub_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sub_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+_mm256_mask_sub_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_sub_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sub_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_psubw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+_mm256_maskz_sub_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_sub_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_add_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __U);
+_mm_mask_add_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_add_epi8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_add_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_paddb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi)
- _mm_setzero_si128 (),
- (__mmask16) __U);
+_mm_maskz_add_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_add_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_add_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+_mm_mask_add_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_add_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_add_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_paddw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+_mm_maskz_add_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_add_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sub_epi8 (__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __U);
+_mm_mask_sub_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_sub_epi8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sub_epi8 (__mmask16 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_psubb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi)
- _mm_setzero_si128 (),
- (__mmask16) __U);
+_mm_maskz_sub_epi8(__mmask16 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_sub_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sub_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+_mm_mask_sub_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_sub_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sub_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_psubw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+_mm_maskz_sub_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_sub_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_mullo_epi16 (__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+_mm256_mask_mullo_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_mullo_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_mullo_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmullw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+_mm256_maskz_mullo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_mullo_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_mullo_epi16 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+_mm_mask_mullo_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_mullo_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_mullo_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmullw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+_mm_maskz_mullo_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_mullo_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -816,937 +787,802 @@ _mm256_mask_blend_epi16 (__mmask16 __U, __m256i __A, __m256i __W)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_abs_epi8 (__m128i __W, __mmask16 __U, __m128i __A)
+_mm_mask_abs_epi8(__m128i __W, __mmask16 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A,
- (__v16qi) __W,
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_abs_epi8(__A),
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_abs_epi8 (__mmask16 __U, __m128i __A)
+_mm_maskz_abs_epi8(__mmask16 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pabsb128_mask ((__v16qi) __A,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_abs_epi8(__A),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_abs_epi8 (__m256i __W, __mmask32 __U, __m256i __A)
+_mm256_mask_abs_epi8(__m256i __W, __mmask32 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A,
- (__v32qi) __W,
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_abs_epi8(__A),
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_abs_epi8 (__mmask32 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_pabsb256_mask ((__v32qi) __A,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_abs_epi8(__A),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_abs_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
+_mm_mask_abs_epi16(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_abs_epi16(__A),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_abs_epi16 (__mmask8 __U, __m128i __A)
+_mm_maskz_abs_epi16(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pabsw128_mask ((__v8hi) __A,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_abs_epi16(__A),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_abs_epi16 (__m256i __W, __mmask16 __U, __m256i __A)
+_mm256_mask_abs_epi16(__m256i __W, __mmask16 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_abs_epi16(__A),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_abs_epi16 (__mmask16 __U, __m256i __A)
+_mm256_maskz_abs_epi16(__mmask16 __U, __m256i __A)
{
- return (__m256i) __builtin_ia32_pabsw256_mask ((__v16hi) __A,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_abs_epi16(__A),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_packs_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
-{
- return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v8hi) _mm_setzero_si128 (), __M);
+_mm_maskz_packs_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_packs_epi32(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_packs_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
- __m128i __B)
+_mm_mask_packs_epi32(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_packssdw128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v8hi) __W, __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_packs_epi32(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_packs_epi32 (__mmask16 __M, __m256i __A, __m256i __B)
+_mm256_maskz_packs_epi32(__mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v16hi) _mm256_setzero_si256 (),
- __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_packs_epi32(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_packs_epi32 (__m256i __W, __mmask16 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_packs_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_packssdw256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v16hi) __W, __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_packs_epi32(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_packs_epi16 (__mmask16 __M, __m128i __A, __m128i __B)
+_mm_maskz_packs_epi16(__mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v16qi) _mm_setzero_si128 (),
- __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_packs_epi16(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_packs_epi16 (__m128i __W, __mmask16 __M, __m128i __A,
- __m128i __B)
+_mm_mask_packs_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_packsswb128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v16qi) __W,
- __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_packs_epi16(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_packs_epi16 (__mmask32 __M, __m256i __A, __m256i __B)
+_mm256_maskz_packs_epi16(__mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_packs_epi16(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_packs_epi16 (__m256i __W, __mmask32 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_packs_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_packsswb256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v32qi) __W,
- __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_packs_epi16(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_packus_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+_mm_maskz_packus_epi32(__mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v8hi) _mm_setzero_si128 (),
- __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_packus_epi32(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_packus_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
- __m128i __B)
+_mm_mask_packus_epi32(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_packusdw128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v8hi) __W, __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_packus_epi32(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_packus_epi32 (__mmask16 __M, __m256i __A, __m256i __B)
+_mm256_maskz_packus_epi32(__mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v16hi) _mm256_setzero_si256 (),
- __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_packus_epi32(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_packus_epi32 (__m256i __W, __mmask16 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_packus_epi32(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_packusdw256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v16hi) __W,
- __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_packus_epi32(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_packus_epi16 (__mmask16 __M, __m128i __A, __m128i __B)
+_mm_maskz_packus_epi16(__mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v16qi) _mm_setzero_si128 (),
- __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_packus_epi16(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_packus_epi16 (__m128i __W, __mmask16 __M, __m128i __A,
- __m128i __B)
+_mm_mask_packus_epi16(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_packuswb128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v16qi) __W,
- __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_packus_epi16(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_packus_epi16 (__mmask32 __M, __m256i __A, __m256i __B)
+_mm256_maskz_packus_epi16(__mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_packus_epi16(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_packus_epi16 (__m256i __W, __mmask32 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_packus_epi16(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_packuswb256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v32qi) __W,
- __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_packus_epi16(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_adds_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
- __m128i __B)
+_mm_mask_adds_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_adds_epi8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_adds_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+_mm_maskz_adds_epi8(__mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddsb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_adds_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_adds_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_adds_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_adds_epi8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_adds_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+_mm256_maskz_adds_epi8(__mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddsb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_adds_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_adds_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_adds_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_adds_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_adds_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_adds_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddsw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_adds_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_adds_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_adds_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_adds_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_adds_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+_mm256_maskz_adds_epi16(__mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddsw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_adds_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_adds_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
- __m128i __B)
+_mm_mask_adds_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_adds_epu8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_adds_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+_mm_maskz_adds_epu8(__mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddusb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_adds_epu8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_adds_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_adds_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_adds_epu8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_adds_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+_mm256_maskz_adds_epu8(__mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddusb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_adds_epu8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_adds_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_adds_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_adds_epu16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_adds_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_adds_epu16(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddusw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_adds_epu16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_adds_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_adds_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_adds_epu16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_adds_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+_mm256_maskz_adds_epu16(__mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddusw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_adds_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_avg_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
- __m128i __B)
+_mm_mask_avg_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_avg_epu8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_avg_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+_mm_maskz_avg_epu8(__mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pavgb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_avg_epu8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_avg_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_avg_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_avg_epu8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_avg_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+_mm256_maskz_avg_epu8(__mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pavgb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_avg_epu8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_avg_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_avg_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_avg_epu16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_avg_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_avg_epu16(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pavgw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_avg_epu16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_avg_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_avg_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_avg_epu16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_avg_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+_mm256_maskz_avg_epu16(__mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pavgw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_avg_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_max_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+_mm_maskz_max_epi8(__mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_max_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_max_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
- __m128i __B)
+_mm_mask_max_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pmaxsb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_max_epi8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_max_epi8 (__mmask32 __M, __m256i __A, __m256i __B)
+_mm256_maskz_max_epi8(__mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_max_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_max_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_max_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pmaxsb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_max_epi8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_max_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+_mm_maskz_max_epi16(__mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_max_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_max_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B)
+_mm_mask_max_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pmaxsw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_max_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_max_epi16 (__mmask16 __M, __m256i __A, __m256i __B)
+_mm256_maskz_max_epi16(__mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_max_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_max_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_max_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pmaxsw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_max_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_max_epu8 (__mmask16 __M, __m128i __A, __m128i __B)
+_mm_maskz_max_epu8(__mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_max_epu8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_max_epu8 (__m128i __W, __mmask16 __M, __m128i __A,
- __m128i __B)
+_mm_mask_max_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pmaxub128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_max_epu8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_max_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_max_epu8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_max_epu8 (__m256i __W, __mmask32 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_max_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pmaxub256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_max_epu8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_max_epu16 (__mmask8 __M, __m128i __A, __m128i __B)
+_mm_maskz_max_epu16(__mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_max_epu16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_max_epu16 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B)
+_mm_mask_max_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pmaxuw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_max_epu16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_max_epu16 (__mmask16 __M, __m256i __A, __m256i __B)
+_mm256_maskz_max_epu16(__mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_max_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_max_epu16 (__m256i __W, __mmask16 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_max_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pmaxuw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_max_epu16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_min_epi8 (__mmask16 __M, __m128i __A, __m128i __B)
+_mm_maskz_min_epi8(__mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_min_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_min_epi8 (__m128i __W, __mmask16 __M, __m128i __A,
- __m128i __B)
+_mm_mask_min_epi8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pminsb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_min_epi8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_min_epi8 (__mmask32 __M, __m256i __A, __m256i __B)
+_mm256_maskz_min_epi8(__mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_min_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_min_epi8 (__m256i __W, __mmask32 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_min_epi8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pminsb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_min_epi8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_min_epi16 (__mmask8 __M, __m128i __A, __m128i __B)
+_mm_maskz_min_epi16(__mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_min_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_min_epi16 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B)
+_mm_mask_min_epi16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pminsw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_min_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_min_epi16 (__mmask16 __M, __m256i __A, __m256i __B)
+_mm256_maskz_min_epi16(__mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_min_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_min_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_min_epi16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pminsw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_min_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_min_epu8 (__mmask16 __M, __m128i __A, __m128i __B)
+_mm_maskz_min_epu8(__mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_min_epu8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_min_epu8 (__m128i __W, __mmask16 __M, __m128i __A,
- __m128i __B)
+_mm_mask_min_epu8(__m128i __W, __mmask16 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pminub128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __M);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__M,
+ (__v16qi)_mm_min_epu8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_min_epu8 (__mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_min_epu8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_min_epu8 (__m256i __W, __mmask32 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_min_epu8(__m256i __W, __mmask32 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pminub256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __M);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__M,
+ (__v32qi)_mm256_min_epu8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_min_epu16 (__mmask8 __M, __m128i __A, __m128i __B)
+_mm_maskz_min_epu16(__mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_min_epu16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_min_epu16 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B)
+_mm_mask_min_epu16(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pminuw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __M);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__M,
+ (__v8hi)_mm_min_epu16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_min_epu16 (__mmask16 __M, __m256i __A, __m256i __B)
+_mm256_maskz_min_epu16(__mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_min_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_min_epu16 (__m256i __W, __mmask16 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_min_epu16(__m256i __W, __mmask16 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pminuw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __M);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__M,
+ (__v16hi)_mm256_min_epu16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_shuffle_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
- __m128i __B)
+_mm_mask_shuffle_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_shuffle_epi8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_shuffle_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+_mm_maskz_shuffle_epi8(__mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pshufb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_shuffle_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_shuffle_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_shuffle_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_shuffle_epi8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_shuffle_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+_mm256_maskz_shuffle_epi8(__mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pshufb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_shuffle_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_subs_epi8 (__m128i __W, __mmask16 __U, __m128i __A,
- __m128i __B)
+_mm_mask_subs_epi8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_subs_epi8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_subs_epi8 (__mmask16 __U, __m128i __A, __m128i __B)
+_mm_maskz_subs_epi8(__mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubsb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_subs_epi8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_subs_epi8 (__m256i __W, __mmask32 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_subs_epi8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_subs_epi8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_subs_epi8 (__mmask32 __U, __m256i __A, __m256i __B)
+_mm256_maskz_subs_epi8(__mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubsb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_subs_epi8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_subs_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_subs_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_subs_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_subs_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_subs_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubsw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_subs_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_subs_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_subs_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_subs_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_subs_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+_mm256_maskz_subs_epi16(__mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubsw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_subs_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_subs_epu8 (__m128i __W, __mmask16 __U, __m128i __A,
- __m128i __B)
+_mm_mask_subs_epu8(__m128i __W, __mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) __W,
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_subs_epu8(__A, __B),
+ (__v16qi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_subs_epu8 (__mmask16 __U, __m128i __A, __m128i __B)
+_mm_maskz_subs_epu8(__mmask16 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubusb128_mask ((__v16qi) __A,
- (__v16qi) __B,
- (__v16qi) _mm_setzero_si128 (),
- (__mmask16) __U);
+ return (__m128i)__builtin_ia32_selectb_128((__mmask16)__U,
+ (__v16qi)_mm_subs_epu8(__A, __B),
+ (__v16qi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_subs_epu8 (__m256i __W, __mmask32 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_subs_epu8(__m256i __W, __mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) __W,
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_subs_epu8(__A, __B),
+ (__v32qi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_subs_epu8 (__mmask32 __U, __m256i __A, __m256i __B)
+_mm256_maskz_subs_epu8(__mmask32 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubusb256_mask ((__v32qi) __A,
- (__v32qi) __B,
- (__v32qi) _mm256_setzero_si256 (),
- (__mmask32) __U);
+ return (__m256i)__builtin_ia32_selectb_256((__mmask32)__U,
+ (__v32qi)_mm256_subs_epu8(__A, __B),
+ (__v32qi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_subs_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_subs_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_subs_epu16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_subs_epu16 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_subs_epu16(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubusw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_subs_epu16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_subs_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B)
-{
- return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+_mm256_mask_subs_epu16(__m256i __W, __mmask16 __U, __m256i __A,
+ __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_subs_epu16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_subs_epu16 (__mmask16 __U, __m256i __A, __m256i __B)
+_mm256_maskz_subs_epu16(__mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubusw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_subs_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -1828,69 +1664,60 @@ _mm256_maskz_permutex2var_epi16 (__mmask16 __U, __m256i __A,
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_maddubs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
- (__v16qi) __Y,
- (__v8hi) __W,
- (__mmask8) __U);
+_mm_mask_maddubs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_maddubs_epi16(__X, __Y),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_maddubs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) {
- return (__m128i) __builtin_ia32_pmaddubsw128_mask ((__v16qi) __X,
- (__v16qi) __Y,
- (__v8hi) _mm_setzero_si128(),
- (__mmask8) __U);
+_mm_maskz_maddubs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_maddubs_epi16(__X, __Y),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_maddubs_epi16 (__m256i __W, __mmask16 __U, __m256i __X,
- __m256i __Y) {
- return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
- (__v32qi) __Y,
- (__v16hi) __W,
- (__mmask16) __U);
+_mm256_mask_maddubs_epi16(__m256i __W, __mmask16 __U, __m256i __X,
+ __m256i __Y) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_maddubs_epi16(__X, __Y),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_maddubs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) {
- return (__m256i) __builtin_ia32_pmaddubsw256_mask ((__v32qi) __X,
- (__v32qi) __Y,
- (__v16hi) _mm256_setzero_si256(),
- (__mmask16) __U);
+_mm256_maskz_maddubs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_maddubs_epi16(__X, __Y),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_madd_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v4si) __W,
- (__mmask8) __U);
+_mm_mask_madd_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_madd_epi16(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_madd_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmaddwd128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v4si) _mm_setzero_si128(),
- (__mmask8) __U);
+_mm_maskz_madd_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_madd_epi16(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_madd_epi16 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v8si) __W,
- (__mmask8) __U);
+_mm256_mask_madd_epi16(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_madd_epi16(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_madd_epi16 (__mmask8 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmaddwd256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v8si) _mm256_setzero_si256(),
- (__mmask8) __U);
+_mm256_maskz_madd_epi16(__mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_madd_epi16(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -2056,104 +1883,89 @@ _mm256_mask_cvtusepi16_storeu_epi8 (void * __P, __mmask8 __M, __m256i __A)
{
__builtin_ia32_pmovuswb256mem_mask ((__v16qi*) __P, (__v16hi) __A, __M);
}
+
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_mulhrs_epi16 (__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
- return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
- (__v8hi) __Y,
- (__v8hi) __W,
- (__mmask8) __U);
+_mm_mask_mulhrs_epi16(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_mulhrs_epi16(__X, __Y),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_mulhrs_epi16 (__mmask8 __U, __m128i __X, __m128i __Y) {
- return (__m128i) __builtin_ia32_pmulhrsw128_mask ((__v8hi) __X,
- (__v8hi) __Y,
- (__v8hi) _mm_setzero_si128(),
- (__mmask8) __U);
+_mm_maskz_mulhrs_epi16(__mmask8 __U, __m128i __X, __m128i __Y) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_mulhrs_epi16(__X, __Y),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_mulhrs_epi16 (__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
- return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
- (__v16hi) __Y,
- (__v16hi) __W,
- (__mmask16) __U);
+_mm256_mask_mulhrs_epi16(__m256i __W, __mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_mulhrs_epi16(__X, __Y),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_mulhrs_epi16 (__mmask16 __U, __m256i __X, __m256i __Y) {
- return (__m256i) __builtin_ia32_pmulhrsw256_mask ((__v16hi) __X,
- (__v16hi) __Y,
- (__v16hi) _mm256_setzero_si256(),
- (__mmask16) __U);
+_mm256_maskz_mulhrs_epi16(__mmask16 __U, __m256i __X, __m256i __Y) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_mulhrs_epi16(__X, __Y),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_mulhi_epu16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+_mm_mask_mulhi_epu16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_mulhi_epu16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_mulhi_epu16 (__mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmulhuw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128(),
- (__mmask8) __U);
+_mm_maskz_mulhi_epu16(__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_mulhi_epu16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_mulhi_epu16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B) {
- return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+_mm256_mask_mulhi_epu16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_mulhi_epu16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_mulhi_epu16 (__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmulhuw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256(),
- (__mmask16) __U);
+_mm256_maskz_mulhi_epu16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_mulhi_epu16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_mulhi_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+_mm_mask_mulhi_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_mulhi_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_mulhi_epi16 (__mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmulhw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) _mm_setzero_si128(),
- (__mmask8) __U);
+_mm_maskz_mulhi_epi16(__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_mulhi_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_mulhi_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B) {
- return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+_mm256_mask_mulhi_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_mulhi_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_mulhi_epi16 (__mmask16 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmulhw256_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) _mm256_setzero_si256(),
- (__mmask16) __U);
+_mm256_maskz_mulhi_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_mulhi_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -2269,72 +2081,68 @@ _mm256_maskz_unpacklo_epi16(__mmask16 __U, __m256i __A, __m256i __B) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi8_epi16 (__m128i __W, __mmask32 __U, __m128i __A)
+_mm_mask_cvtepi8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovsxbw128_mask ((__v16qi) __A,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_cvtepi8_epi16(__A),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi8_epi16 (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtepi8_epi16(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovsxbw128_mask ((__v16qi) __A,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_cvtepi8_epi16(__A),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepi8_epi16 (__m256i __W, __mmask32 __U, __m128i __A)
+_mm256_mask_cvtepi8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovsxbw256_mask ((__v16qi) __A,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_cvtepi8_epi16(__A),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepi8_epi16 (__mmask16 __U, __m128i __A)
+_mm256_maskz_cvtepi8_epi16(__mmask16 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovsxbw256_mask ((__v16qi) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_cvtepi8_epi16(__A),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu8_epi16 (__m128i __W, __mmask32 __U, __m128i __A)
+_mm_mask_cvtepu8_epi16(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovzxbw128_mask ((__v16qi) __A,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_cvtepu8_epi16(__A),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu8_epi16 (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtepu8_epi16(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovzxbw128_mask ((__v16qi) __A,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_cvtepu8_epi16(__A),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu8_epi16 (__m256i __W, __mmask32 __U, __m128i __A)
+_mm256_mask_cvtepu8_epi16(__m256i __W, __mmask16 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovzxbw256_mask ((__v16qi) __A,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_cvtepu8_epi16(__A),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovzxbw256_mask ((__v16qi) __A,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_cvtepu8_epi16(__A),
+ (__v16hi)_mm256_setzero_si256());
}
@@ -2461,366 +2269,328 @@ _mm256_maskz_cvtepu8_epi16 (__mmask16 __U, __m128i __A)
(__v16hi)_mm256_setzero_si256()); })
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_sllv_epi16 (__m256i __A, __m256i __B)
+_mm256_sllv_epi16(__m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) -1);
+ return (__m256i)__builtin_ia32_psllv16hi((__v16hi)__A, (__v16hi)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sllv_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_sllv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_sllv_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sllv_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+_mm256_maskz_sllv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psllv16hi_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_sllv_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sllv_epi16 (__m128i __A, __m128i __B)
+_mm_sllv_epi16(__m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_hi (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_psllv8hi((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sllv_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_sllv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_sllv_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sllv_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_sllv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psllv8hi_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_sllv_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sll_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_sll_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psllw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_sll_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maskz_sll_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psllw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_sll_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sll_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m128i __B)
+_mm256_mask_sll_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psllw256_mask ((__v16hi) __A,
- (__v8hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_sll_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sll_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+_mm256_maskz_sll_epi16(__mmask16 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psllw256_mask ((__v16hi) __A,
- (__v8hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_sll_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
-#define _mm_mask_slli_epi16(W, U, A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_psllwi128_mask((__v8hi)(__m128i)(A), (int)(B), \
- (__v8hi)(__m128i)(W), \
- (__mmask8)(U)); })
-
-#define _mm_maskz_slli_epi16(U, A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_psllwi128_mask((__v8hi)(__m128i)(A), (int)(B), \
- (__v8hi)_mm_setzero_si128(), \
- (__mmask8)(U)); })
-
-#define _mm256_mask_slli_epi16(W, U, A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_psllwi256_mask((__v16hi)(__m256i)(A), (int)(B), \
- (__v16hi)(__m256i)(W), \
- (__mmask16)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_slli_epi16(__A, __B),
+ (__v8hi)__W);
+}
-#define _mm256_maskz_slli_epi16(U, A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_psllwi256_mask((__v16hi)(__m256i)(A), (int)(B), \
- (__v16hi)_mm256_setzero_si256(), \
- (__mmask16)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_slli_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
+}
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_slli_epi16(__A, __B),
+ (__v16hi)__W);
+}
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_slli_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
+}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_srlv_epi16 (__m256i __A, __m256i __B)
+_mm256_srlv_epi16(__m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) -1);
+ return (__m256i)__builtin_ia32_psrlv16hi((__v16hi)__A, (__v16hi)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_srlv_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_srlv_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_srlv_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_srlv_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+_mm256_maskz_srlv_epi16(__mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psrlv16hi_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_srlv_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srlv_epi16 (__m128i __A, __m128i __B)
+_mm_srlv_epi16(__m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_hi (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_psrlv8hi((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_srlv_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_srlv_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_srlv_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_srlv_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_srlv_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrlv8hi_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_srlv_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_srav_epi16 (__m256i __A, __m256i __B)
+_mm256_srav_epi16(__m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) -1);
+ return (__m256i)__builtin_ia32_psrav16hi((__v16hi)__A, (__v16hi)__B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_srav_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_srav_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_srav_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_srav_epi16 (__mmask16 __U, __m256i __A, __m256i __B)
+_mm256_maskz_srav_epi16(__mmask16 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psrav16hi_mask ((__v16hi) __A,
- (__v16hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_srav_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srav_epi16 (__m128i __A, __m128i __B)
+_mm_srav_epi16(__m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_hi (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_psrav8hi((__v8hi)__A, (__v8hi)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_srav_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_srav_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_srav_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_srav_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_srav_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrav8hi_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_srav_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sra_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_sra_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psraw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_sra_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sra_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_sra_epi16(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psraw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_sra_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sra_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m128i __B)
+_mm256_mask_sra_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psraw256_mask ((__v16hi) __A,
- (__v8hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_sra_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sra_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+_mm256_maskz_sra_epi16(__mmask16 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psraw256_mask ((__v16hi) __A,
- (__v8hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_sra_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
-#define _mm_mask_srai_epi16(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psrawi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
- (__v8hi)(__m128i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_srai_epi16(__A, __B),
+ (__v8hi)__W);
+}
-#define _mm_maskz_srai_epi16(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psrawi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
- (__v8hi)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_srai_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
+}
-#define _mm256_mask_srai_epi16(W, U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psrawi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
- (__v16hi)(__m256i)(W), \
- (__mmask16)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_srai_epi16(__A, __B),
+ (__v16hi)__W);
+}
-#define _mm256_maskz_srai_epi16(U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psrawi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
- (__v16hi)_mm256_setzero_si256(), \
- (__mmask16)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_srai_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
+}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_srl_epi16 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_srl_epi16(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrlw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_srl_epi16(__A, __B),
+ (__v8hi)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maskz_srl_epi16 (__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrlw128_mask ((__v8hi) __A,
- (__v8hi) __B,
- (__v8hi)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_srl_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_srl_epi16 (__m256i __W, __mmask16 __U, __m256i __A,
- __m128i __B)
+_mm256_mask_srl_epi16(__m256i __W, __mmask16 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psrlw256_mask ((__v16hi) __A,
- (__v8hi) __B,
- (__v16hi) __W,
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_srl_epi16(__A, __B),
+ (__v16hi)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_srl_epi16 (__mmask16 __U, __m256i __A, __m128i __B)
+_mm256_maskz_srl_epi16(__mmask16 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psrlw256_mask ((__v16hi) __A,
- (__v8hi) __B,
- (__v16hi)
- _mm256_setzero_si256 (),
- (__mmask16) __U);
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_srl_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
}
-#define _mm_mask_srli_epi16(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psrlwi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
- (__v8hi)(__m128i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srli_epi16(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_srli_epi16(__A, __B),
+ (__v8hi)__W);
+}
-#define _mm_maskz_srli_epi16(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psrlwi128_mask((__v8hi)(__m128i)(A), (int)(imm), \
- (__v8hi)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srli_epi16 (__mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
+ (__v8hi)_mm_srli_epi16(__A, __B),
+ (__v8hi)_mm_setzero_si128());
+}
-#define _mm256_mask_srli_epi16(W, U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psrlwi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
- (__v16hi)(__m256i)(W), \
- (__mmask16)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srli_epi16(__m256i __W, __mmask16 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_srli_epi16(__A, __B),
+ (__v16hi)__W);
+}
-#define _mm256_maskz_srli_epi16(U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psrlwi256_mask((__v16hi)(__m256i)(A), (int)(imm), \
- (__v16hi)_mm256_setzero_si256(), \
- (__mmask16)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srli_epi16(__mmask16 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
+ (__v16hi)_mm256_srli_epi16(__A, __B),
+ (__v16hi)_mm256_setzero_si256());
+}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_mov_epi16 (__m128i __W, __mmask8 __U, __m128i __A)
@@ -3342,28 +3112,24 @@ _mm256_mask_permutexvar_epi16 (__m256i __W, __mmask16 __M, __m256i __A,
}
#define _mm_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \
- (__m128i)__builtin_ia32_palignr128_mask((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(N), \
- (__v16qi)(__m128i)(W), \
- (__mmask16)(U)); })
+ (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+ (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
+ (__v16qi)(__m128i)(W)); })
#define _mm_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \
- (__m128i)__builtin_ia32_palignr128_mask((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(N), \
- (__v16qi)_mm_setzero_si128(), \
- (__mmask16)(U)); })
+ (__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
+ (__v16qi)_mm_alignr_epi8((A), (B), (int)(N)), \
+ (__v16qi)_mm_setzero_si128()); })
#define _mm256_mask_alignr_epi8(W, U, A, B, N) __extension__ ({ \
- (__m256i)__builtin_ia32_palignr256_mask((__v32qi)(__m256i)(A), \
- (__v32qi)(__m256i)(B), (int)(N), \
- (__v32qi)(__m256i)(W), \
- (__mmask32)(U)); })
+ (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+ (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
+ (__v32qi)(__m256i)(W)); })
#define _mm256_maskz_alignr_epi8(U, A, B, N) __extension__ ({ \
- (__m256i)__builtin_ia32_palignr256_mask((__v32qi)(__m256i)(A), \
- (__v32qi)(__m256i)(B), (int)(N), \
- (__v32qi)_mm256_setzero_si256(), \
- (__mmask32)(U)); })
+ (__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
+ (__v32qi)_mm256_alignr_epi8((A), (B), (int)(N)), \
+ (__v32qi)_mm256_setzero_si256()); })
#define _mm_dbsad_epu8(A, B, imm) __extension__ ({ \
(__m128i)__builtin_ia32_dbpsadbw128_mask((__v16qi)(__m128i)(A), \
diff --git a/lib/Headers/avx512vldqintrin.h b/lib/Headers/avx512vldqintrin.h
index 8187bcd6b28e..cd9da4370564 100644
--- a/lib/Headers/avx512vldqintrin.h
+++ b/lib/Headers/avx512vldqintrin.h
@@ -37,20 +37,17 @@ _mm256_mullo_epi64 (__m256i __A, __m256i __B) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_mullo_epi64 (__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di) __W,
- (__mmask8) __U);
+_mm256_mask_mullo_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_mullo_epi64(__A, __B),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_mullo_epi64 (__mmask8 __U, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmullq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+_mm256_maskz_mullo_epi64(__mmask8 __U, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_mullo_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -59,293 +56,241 @@ _mm_mullo_epi64 (__m128i __A, __m128i __B) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_mullo_epi64 (__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W,
- (__mmask8) __U);
+_mm_mask_mullo_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_mullo_epi64(__A, __B),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_mullo_epi64 (__mmask8 __U, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmullq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+_mm_maskz_mullo_epi64(__mmask8 __U, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_mullo_epi64(__A, __B),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_andnot_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_andnot_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_andnot_pd(__A, __B),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_andnot_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_andnpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_andnot_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_andnot_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_andnot_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_andnot_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_andnot_pd(__A, __B),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_andnot_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_andnpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+_mm_maskz_andnot_pd(__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_andnot_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_andnot_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_andnot_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_andnot_ps(__A, __B),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_andnot_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_andnps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_andnot_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_andnot_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_andnot_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_andnot_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_andnot_ps(__A, __B),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_andnot_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_andnps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_andnot_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_andnot_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_and_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_and_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_and_pd(__A, __B),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_and_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_andpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_and_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_and_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_and_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_and_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_and_pd(__A, __B),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_and_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_andpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+_mm_maskz_and_pd(__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_and_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_and_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_and_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_and_ps(__A, __B),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_and_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_andps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_and_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_and_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_and_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_and_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_and_ps(__A, __B),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_and_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_andps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_and_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_and_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_xor_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B) {
- return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_xor_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_xor_pd(__A, __B),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_xor_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_xorpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_xor_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_xor_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_xor_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_xor_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_xor_pd(__A, __B),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_xor_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_xorpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_xor_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_xor_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_xor_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_xor_ps(__A, __B),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_xor_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_xorps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_xor_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_xor_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_xor_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_xor_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_xor_ps(__A, __B),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_xor_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_xorps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_xor_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_xor_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_or_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_or_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_or_pd(__A, __B),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_or_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_orpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_or_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_or_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_or_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_or_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_or_pd(__A, __B),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_or_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_orpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+_mm_maskz_or_pd(__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_or_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_or_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_or_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_or_ps(__A, __B),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_or_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_orps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_or_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_or_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_or_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_or_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_or_ps(__A, __B),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_or_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_orps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_or_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_or_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -1151,82 +1096,72 @@ _mm256_maskz_broadcast_i64x2 (__mmask8 __M, __m128i __A)
}
#define _mm256_extractf64x2_pd(A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)-1); })
+ (__m128d)__builtin_shufflevector((__v4df)(__m256d)(A), \
+ (__v4df)_mm256_undefined_pd(), \
+ ((imm) & 1) ? 2 : 0, \
+ ((imm) & 1) ? 3 : 1); })
#define _mm256_mask_extractf64x2_pd(W, U, A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v2df)(__m128d)(W), \
- (__mmask8)(U)); })
+ (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm256_extractf64x2_pd((A), (imm)), \
+ (__v2df)(W)); })
#define _mm256_maskz_extractf64x2_pd(U, A, imm) __extension__ ({ \
- (__m128d)__builtin_ia32_extractf64x2_256_mask((__v4df)(__m256d)(A), \
- (int)(imm), \
- (__v2df)_mm_setzero_pd(), \
- (__mmask8)(U)); })
+ (__m128d)__builtin_ia32_selectpd_128((__mmask8)(U), \
+ (__v2df)_mm256_extractf64x2_pd((A), (imm)), \
+ (__v2df)_mm_setzero_pd()); })
#define _mm256_extracti64x2_epi64(A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
- (int)(imm), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)-1); })
+ (__m128i)__builtin_shufflevector((__v4di)(__m256i)(A), \
+ (__v4di)_mm256_undefined_si256(), \
+ ((imm) & 1) ? 2 : 0, \
+ ((imm) & 1) ? 3 : 1); })
#define _mm256_mask_extracti64x2_epi64(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
- (int)(imm), \
- (__v2di)(__m128i)(W), \
- (__mmask8)(U)); })
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm256_extracti64x2_epi64((A), (imm)), \
+ (__v2di)(W)); })
#define _mm256_maskz_extracti64x2_epi64(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti64x2_256_mask((__v4di)(__m256i)(A), \
- (int)(imm), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)(U)); })
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm256_extracti64x2_epi64((A), (imm)), \
+ (__v2di)_mm_setzero_di()); })
#define _mm256_insertf64x2(A, B, imm) __extension__ ({ \
- (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(imm), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)-1); })
+ (__m256d)__builtin_shufflevector((__v4df)(A), \
+ (__v4df)_mm256_castpd128_pd256((__m128d)(B)), \
+ ((imm) & 0x1) ? 0 : 4, \
+ ((imm) & 0x1) ? 1 : 5, \
+ ((imm) & 0x1) ? 4 : 2, \
+ ((imm) & 0x1) ? 5 : 3); })
#define _mm256_mask_insertf64x2(W, U, A, B, imm) __extension__ ({ \
- (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(imm), \
- (__v4df)(__m256d)(W), \
- (__mmask8)(U)); })
+ (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_insertf64x2((A), (B), (imm)), \
+ (__v4df)(W)); })
#define _mm256_maskz_insertf64x2(U, A, B, imm) __extension__ ({ \
- (__m256d)__builtin_ia32_insertf64x2_256_mask((__v4df)(__m256d)(A), \
- (__v2df)(__m128d)(B), \
- (int)(imm), \
- (__v4df)_mm256_setzero_pd(), \
- (__mmask8)(U)); })
+ (__m256d)__builtin_ia32_selectpd_256((__mmask8)(U), \
+ (__v4df)_mm256_insertf64x2((A), (B), (imm)), \
+ (__v4df)_mm256_setzero_pd()); })
#define _mm256_inserti64x2(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
- (__v2di)(__m128i)(B), \
- (int)(imm), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)-1); })
+ (__m256i)__builtin_shufflevector((__v4di)(A), \
+ (__v4di)_mm256_castsi128_si256((__m128i)(B)), \
+ ((imm) & 0x1) ? 0 : 4, \
+ ((imm) & 0x1) ? 1 : 5, \
+ ((imm) & 0x1) ? 4 : 2, \
+ ((imm) & 0x1) ? 5 : 3); })
#define _mm256_mask_inserti64x2(W, U, A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
- (__v2di)(__m128i)(B), \
- (int)(imm), \
- (__v4di)(__m256i)(W), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_inserti64x2((A), (B), (imm)), \
+ (__v4di)(W)); })
#define _mm256_maskz_inserti64x2(U, A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_inserti64x2_256_mask((__v4di)(__m256i)(A), \
- (__v2di)(__m128i)(B), \
- (int)(imm), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_inserti64x2((A), (B), (imm)), \
+ (__v4di)_mm256_setzero_si256()); })
#define _mm_mask_fpclass_pd_mask(U, A, imm) __extension__ ({ \
(__mmask8)__builtin_ia32_fpclasspd128_mask((__v2df)(__m128d)(A), (int)(imm), \
diff --git a/lib/Headers/avx512vlintrin.h b/lib/Headers/avx512vlintrin.h
index 295ce291f7ce..f3744da6ab8a 100644
--- a/lib/Headers/avx512vlintrin.h
+++ b/lib/Headers/avx512vlintrin.h
@@ -616,277 +616,227 @@ _mm256_mask_cmpneq_epu64_mask(__mmask8 __u, __m256i __a, __m256i __b) {
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_add_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_add_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_add_epi32(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_add_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+_mm256_maskz_add_epi32(__mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_add_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_add_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_add_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_add_epi64(__A, __B),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_add_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+_mm256_maskz_add_epi64(__mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_paddq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_add_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sub_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_sub_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_sub_epi32(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sub_epi32 (__mmask8 __U, __m256i __A, __m256i __B)
+_mm256_maskz_sub_epi32(__mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_sub_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sub_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
- __m256i __B)
+_mm256_mask_sub_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_sub_epi64(__A, __B),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sub_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
+_mm256_maskz_sub_epi64(__mmask8 __U, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_psubq256_mask ((__v4di) __A,
- (__v4di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_sub_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_add_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_add_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_add_epi32(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_add_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_add_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_add_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_add_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_add_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_add_epi64(__A, __B),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_add_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_add_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_paddq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_add_epi64(__A, __B),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sub_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_sub_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_sub_epi32(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sub_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_sub_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_sub_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sub_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_sub_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_sub_epi64(__A, __B),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sub_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_sub_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psubq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_sub_epi64(__A, __B),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_mul_epi32 (__m256i __W, __mmask8 __M, __m256i __X,
- __m256i __Y)
+_mm256_mask_mul_epi32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X,
- (__v8si) __Y,
- (__v4di) __W, __M);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_mul_epi32(__X, __Y),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_mul_epi32 (__mmask8 __M, __m256i __X, __m256i __Y)
+_mm256_maskz_mul_epi32(__mmask8 __M, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_pmuldq256_mask ((__v8si) __X,
- (__v8si) __Y,
- (__v4di)
- _mm256_setzero_si256 (),
- __M);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_mul_epi32(__X, __Y),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_mul_epi32 (__m128i __W, __mmask8 __M, __m128i __X,
- __m128i __Y)
+_mm_mask_mul_epi32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X,
- (__v4si) __Y,
- (__v2di) __W, __M);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_mul_epi32(__X, __Y),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_mul_epi32 (__mmask8 __M, __m128i __X, __m128i __Y)
+_mm_maskz_mul_epi32(__mmask8 __M, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_pmuldq128_mask ((__v4si) __X,
- (__v4si) __Y,
- (__v2di)
- _mm_setzero_si128 (),
- __M);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_mul_epi32(__X, __Y),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_mul_epu32 (__m256i __W, __mmask8 __M, __m256i __X,
- __m256i __Y)
+_mm256_mask_mul_epu32(__m256i __W, __mmask8 __M, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X,
- (__v8si) __Y,
- (__v4di) __W, __M);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_mul_epu32(__X, __Y),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_mul_epu32 (__mmask8 __M, __m256i __X, __m256i __Y)
+_mm256_maskz_mul_epu32(__mmask8 __M, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_pmuludq256_mask ((__v8si) __X,
- (__v8si) __Y,
- (__v4di)
- _mm256_setzero_si256 (),
- __M);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__M,
+ (__v4di)_mm256_mul_epu32(__X, __Y),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_mul_epu32 (__m128i __W, __mmask8 __M, __m128i __X,
- __m128i __Y)
+_mm_mask_mul_epu32(__m128i __W, __mmask8 __M, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X,
- (__v4si) __Y,
- (__v2di) __W, __M);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_mul_epu32(__X, __Y),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_mul_epu32 (__mmask8 __M, __m128i __X, __m128i __Y)
+_mm_maskz_mul_epu32(__mmask8 __M, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_pmuludq128_mask ((__v4si) __X,
- (__v4si) __Y,
- (__v2di)
- _mm_setzero_si128 (),
- __M);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__M,
+ (__v2di)_mm_mul_epu32(__X, __Y),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_mullo_epi32 (__mmask8 __M, __m256i __A, __m256i __B)
+_mm256_maskz_mullo_epi32(__mmask8 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- __M);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_mullo_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_mullo_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
- __m256i __B)
+_mm256_mask_mullo_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_pmulld256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si) __W, __M);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_mullo_epi32(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_mullo_epi32 (__mmask8 __M, __m128i __A, __m128i __B)
+_mm_maskz_mullo_epi32(__mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- __M);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm_mullo_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_mullo_epi32 (__m128i __W, __mmask16 __M, __m128i __A,
- __m128i __B)
+_mm_mask_mullo_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pmulld128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W, __M);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm_mullo_epi32(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
@@ -1895,71 +1845,59 @@ _mm256_mask3_fnmsub_ps(__m256 __A, __m256 __B, __m256 __C, __mmask8 __U)
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_add_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_add_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_add_pd(__A, __B),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_add_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_addpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+_mm_maskz_add_pd(__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_add_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_add_pd (__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_add_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_add_pd(__A, __B),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_add_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_addpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_add_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_add_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_add_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_add_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_add_ps(__A, __B),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_add_ps (__mmask16 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_addps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_add_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_add_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_add_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_add_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_add_ps(__A, __B),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_add_ps (__mmask16 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_addps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_add_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_add_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -2196,32 +2134,30 @@ _mm256_mask_compressstoreu_epi32 (void *__P, __mmask8 __U, __m256i __A) {
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_cvtepi32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
- return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
- (__v2df) __W,
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
+ (__v2df)_mm_cvtepi32_pd(__A),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
- return (__m128d) __builtin_ia32_cvtdq2pd128_mask ((__v4si) __A,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
+ (__v2df)_mm_cvtepi32_pd(__A),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_mask_cvtepi32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
- return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
- (__v4df) __W,
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
+ (__v4df)_mm256_cvtepi32_pd(__A),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepi32_pd (__mmask8 __U, __m128i __A) {
- return (__m256d) __builtin_ia32_cvtdq2pd256_mask ((__v4si) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
+ (__v4df)_mm256_cvtepi32_pd(__A),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -2620,48 +2556,41 @@ _mm256_maskz_cvttps_epu32 (__mmask8 __U, __m256 __A) {
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cvtepu32_pd (__m128i __A) {
- return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) -1);
+ return (__m128d) __builtin_convertvector(
+ __builtin_shufflevector((__v4su)__A, (__v4su)__A, 0, 1), __v2df);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mask_cvtepu32_pd (__m128d __W, __mmask8 __U, __m128i __A) {
- return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
- (__v2df) __W,
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
+ (__v2df)_mm_cvtepu32_pd(__A),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
- return (__m128d) __builtin_ia32_cvtudq2pd128_mask ((__v4si) __A,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8) __U,
+ (__v2df)_mm_cvtepu32_pd(__A),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_cvtepu32_pd (__m128i __A) {
- return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) -1);
+ return (__m256d)__builtin_convertvector((__v4su)__A, __v4df);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_mask_cvtepu32_pd (__m256d __W, __mmask8 __U, __m128i __A) {
- return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
- (__v4df) __W,
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
+ (__v4df)_mm256_cvtepu32_pd(__A),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepu32_pd (__mmask8 __U, __m128i __A) {
- return (__m256d) __builtin_ia32_cvtudq2pd256_mask ((__v4si) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8) __U,
+ (__v4df)_mm256_cvtepu32_pd(__A),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -2711,72 +2640,59 @@ _mm256_maskz_cvtepu32_ps (__mmask8 __U, __m256i __A) {
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_div_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_div_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_div_pd(__A, __B),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_div_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_divpd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+_mm_maskz_div_pd(__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_div_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_div_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B) {
- return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_div_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_div_pd(__A, __B),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_div_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_divpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_div_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_div_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_div_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_div_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_div_ps(__A, __B),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_div_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_divps_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_div_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_div_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_div_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_div_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_div_ps(__A, __B),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_div_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_divps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_div_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_div_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
@@ -3127,240 +3043,199 @@ _mm256_maskz_getexp_ps (__mmask8 __U, __m256 __A) {
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_max_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_max_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_max_pd(__A, __B),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_max_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_maxpd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+_mm_maskz_max_pd(__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_max_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_max_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B) {
- return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_max_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_max_pd(__A, __B),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_max_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_maxpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_max_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_max_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_max_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_max_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_max_ps(__A, __B),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_max_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_maxps_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_max_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_max_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_max_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_max_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_max_ps(__A, __B),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_max_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_maxps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_max_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_max_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_min_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_min_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_min_pd(__A, __B),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_min_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_minpd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+_mm_maskz_min_pd(__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_min_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_min_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B) {
- return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_min_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_min_pd(__A, __B),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_min_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_minpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_min_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_min_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_min_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_min_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_min_ps(__A, __B),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_min_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_minps_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_min_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_min_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_min_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_min_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_min_ps(__A, __B),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_min_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_minps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_min_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_min_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_mul_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_mul_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_mul_pd(__A, __B),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_mul_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_mulpd_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+_mm_maskz_mul_pd(__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_mul_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_mul_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B) {
- return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_mul_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_mul_pd(__A, __B),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_mul_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_mulpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_mul_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_mul_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_mul_ps (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_mul_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_mul_ps(__A, __B),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_mul_ps (__mmask8 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_mulps_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_mul_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_mul_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_mul_ps (__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_mul_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_mul_ps(__A, __B),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_mul_ps (__mmask8 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_mulps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_mul_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_mul_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_abs_epi32 (__m128i __W, __mmask8 __U, __m128i __A) {
- return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
- (__v4si) __W,
- (__mmask8) __U);
+_mm_mask_abs_epi32(__m128i __W, __mmask8 __U, __m128i __A) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_abs_epi32(__A),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_abs_epi32 (__mmask8 __U, __m128i __A) {
- return (__m128i) __builtin_ia32_pabsd128_mask ((__v4si) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+_mm_maskz_abs_epi32(__mmask8 __U, __m128i __A) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_abs_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_abs_epi32 (__m256i __W, __mmask8 __U, __m256i __A) {
- return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
- (__v8si) __W,
- (__mmask8) __U);
+_mm256_mask_abs_epi32(__m256i __W, __mmask8 __U, __m256i __A) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask16)__U,
+ (__v8si)_mm256_abs_epi32(__A),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_abs_epi32 (__mmask8 __U, __m256i __A) {
- return (__m256i) __builtin_ia32_pabsd256_mask ((__v8si) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+_mm256_maskz_abs_epi32(__mmask8 __U, __m256i __A) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask16)__U,
+ (__v8si)_mm256_abs_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -3410,37 +3285,31 @@ _mm256_maskz_abs_epi64 (__mmask8 __U, __m256i __A) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_max_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- __M);
+_mm_maskz_max_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm_max_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_max_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pmaxsd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W, __M);
+_mm_mask_max_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm_max_epi32(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_max_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- __M);
+_mm256_maskz_max_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_max_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_max_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
- __m256i __B) {
- return (__m256i) __builtin_ia32_pmaxsd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si) __W, __M);
+_mm256_mask_max_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_max_epi32(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -3496,37 +3365,31 @@ _mm256_max_epi64 (__m256i __A, __m256i __B) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_max_epu32 (__mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- __M);
+_mm_maskz_max_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm_max_epu32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_max_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pmaxud128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W, __M);
+_mm_mask_max_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm_max_epu32(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_max_epu32 (__mmask8 __M, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- __M);
+_mm256_maskz_max_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_max_epu32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_max_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
- __m256i __B) {
- return (__m256i) __builtin_ia32_pmaxud256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si) __W, __M);
+_mm256_mask_max_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_max_epu32(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -3582,37 +3445,31 @@ _mm256_mask_max_epu64 (__m256i __W, __mmask8 __M, __m256i __A,
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_min_epi32 (__mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- __M);
+_mm_maskz_min_epi32(__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm_min_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_min_epi32 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pminsd128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W, __M);
+_mm_mask_min_epi32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm_min_epi32(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_min_epi32 (__mmask8 __M, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- __M);
+_mm256_maskz_min_epi32(__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_min_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_min_epi32 (__m256i __W, __mmask8 __M, __m256i __A,
- __m256i __B) {
- return (__m256i) __builtin_ia32_pminsd256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si) __W, __M);
+_mm256_mask_min_epi32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_min_epi32(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -3668,37 +3525,31 @@ _mm256_maskz_min_epi64 (__mmask8 __M, __m256i __A, __m256i __B) {
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_min_epu32 (__mmask8 __M, __m128i __A, __m128i __B) {
- return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- __M);
+_mm_maskz_min_epu32(__mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm_min_epu32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_min_epu32 (__m128i __W, __mmask8 __M, __m128i __A,
- __m128i __B) {
- return (__m128i) __builtin_ia32_pminud128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W, __M);
+_mm_mask_min_epu32(__m128i __W, __mmask8 __M, __m128i __A, __m128i __B) {
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__M,
+ (__v4si)_mm_min_epu32(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_min_epu32 (__mmask8 __M, __m256i __A, __m256i __B) {
- return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- __M);
+_mm256_maskz_min_epu32(__mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_min_epu32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_min_epu32 (__m256i __W, __mmask8 __M, __m256i __A,
- __m256i __B) {
- return (__m256i) __builtin_ia32_pminud256_mask ((__v8si) __A,
- (__v8si) __B,
- (__v8si) __W, __M);
+_mm256_mask_min_epu32(__m256i __W, __mmask8 __M, __m256i __A, __m256i __B) {
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__M,
+ (__v8si)_mm256_min_epu32(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -4095,132 +3946,115 @@ _mm256_maskz_scalef_ps (__mmask8 __U, __m256 __A, __m256 __B) {
(__v8si)(__m256i)(v1), (int)(scale)); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_sqrt_pd (__m128d __W, __mmask8 __U, __m128d __A) {
- return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_sqrt_pd(__m128d __W, __mmask8 __U, __m128d __A) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_sqrt_pd(__A),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_sqrt_pd (__mmask8 __U, __m128d __A) {
- return (__m128d) __builtin_ia32_sqrtpd128_mask ((__v2df) __A,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+_mm_maskz_sqrt_pd(__mmask8 __U, __m128d __A) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_sqrt_pd(__A),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_sqrt_pd (__m256d __W, __mmask8 __U, __m256d __A) {
- return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_sqrt_pd(__m256d __W, __mmask8 __U, __m256d __A) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_sqrt_pd(__A),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_sqrt_pd (__mmask8 __U, __m256d __A) {
- return (__m256d) __builtin_ia32_sqrtpd256_mask ((__v4df) __A,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_sqrt_pd(__mmask8 __U, __m256d __A) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_sqrt_pd(__A),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_sqrt_ps (__m128 __W, __mmask8 __U, __m128 __A) {
- return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_sqrt_ps(__m128 __W, __mmask8 __U, __m128 __A) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_sqrt_ps(__A),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_sqrt_ps (__mmask8 __U, __m128 __A) {
- return (__m128) __builtin_ia32_sqrtps128_mask ((__v4sf) __A,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_sqrt_ps(__mmask8 __U, __m128 __A) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_sqrt_ps(__A),
+ (__v4sf)_mm_setzero_pd());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_sqrt_ps (__m256 __W, __mmask8 __U, __m256 __A) {
- return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_sqrt_ps(__m256 __W, __mmask8 __U, __m256 __A) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_sqrt_ps(__A),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_sqrt_ps (__mmask8 __U, __m256 __A) {
- return (__m256) __builtin_ia32_sqrtps256_mask ((__v8sf) __A,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_sqrt_ps(__mmask8 __U, __m256 __A) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_sqrt_ps(__A),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_sub_pd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df) __W,
- (__mmask8) __U);
+_mm_mask_sub_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_sub_pd(__A, __B),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_sub_pd (__mmask8 __U, __m128d __A, __m128d __B) {
- return (__m128d) __builtin_ia32_subpd128_mask ((__v2df) __A,
- (__v2df) __B,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+_mm_maskz_sub_pd(__mmask8 __U, __m128d __A, __m128d __B) {
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_sub_pd(__A, __B),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_sub_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256d __B) {
- return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df) __W,
- (__mmask8) __U);
+_mm256_mask_sub_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_sub_pd(__A, __B),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_sub_pd (__mmask8 __U, __m256d __A, __m256d __B) {
- return (__m256d) __builtin_ia32_subpd256_mask ((__v4df) __A,
- (__v4df) __B,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8) __U);
+_mm256_maskz_sub_pd(__mmask8 __U, __m256d __A, __m256d __B) {
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_sub_pd(__A, __B),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_sub_ps (__m128 __W, __mmask16 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf) __W,
- (__mmask8) __U);
+_mm_mask_sub_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_sub_ps(__A, __B),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_sub_ps (__mmask16 __U, __m128 __A, __m128 __B) {
- return (__m128) __builtin_ia32_subps128_mask ((__v4sf) __A,
- (__v4sf) __B,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+_mm_maskz_sub_ps(__mmask8 __U, __m128 __A, __m128 __B) {
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_sub_ps(__A, __B),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_sub_ps (__m256 __W, __mmask16 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf) __W,
- (__mmask8) __U);
+_mm256_mask_sub_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_sub_ps(__A, __B),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_sub_ps (__mmask16 __U, __m256 __A, __m256 __B) {
- return (__m256) __builtin_ia32_subps256_mask ((__v8sf) __A,
- (__v8sf) __B,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+_mm256_maskz_sub_ps(__mmask8 __U, __m256 __A, __m256 __B) {
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_sub_ps(__A, __B),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -4551,344 +4385,324 @@ _mm256_maskz_permutex2var_epi64 (__mmask8 __U, __m256i __A,
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi8_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+_mm_mask_cvtepi8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovsxbd128_mask ((__v16qi) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepi8_epi32(__A),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtepi8_epi32(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovsxbd128_mask ((__v16qi) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepi8_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_cvtepi8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovsxbd256_mask ((__v16qi) __A,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepi8_epi32(__A),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepi8_epi32 (__mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovsxbd256_mask ((__v16qi) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepi8_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi8_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+_mm_mask_cvtepi8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovsxbq128_mask ((__v16qi) __A,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi8_epi64(__A),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovsxbq128_mask ((__v16qi) __A,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi8_epi64(__A),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepi8_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+_mm256_mask_cvtepi8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovsxbq256_mask ((__v16qi) __A,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi8_epi64(__A),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepi8_epi64 (__mmask8 __U, __m128i __A)
+_mm256_maskz_cvtepi8_epi64(__mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovsxbq256_mask ((__v16qi) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi8_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi32_epi64 (__m128i __W, __mmask8 __U, __m128i __X)
+_mm_mask_cvtepi32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
{
- return (__m128i) __builtin_ia32_pmovsxdq128_mask ((__v4si) __X,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi32_epi64(__X),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi32_epi64 (__mmask8 __U, __m128i __X)
+_mm_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
{
- return (__m128i) __builtin_ia32_pmovsxdq128_mask ((__v4si) __X,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi32_epi64(__X),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepi32_epi64 (__m256i __W, __mmask8 __U, __m128i __X)
+_mm256_mask_cvtepi32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
{
- return (__m256i) __builtin_ia32_pmovsxdq256_mask ((__v4si) __X,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi32_epi64(__X),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepi32_epi64 (__mmask8 __U, __m128i __X)
+_mm256_maskz_cvtepi32_epi64(__mmask8 __U, __m128i __X)
{
- return (__m256i) __builtin_ia32_pmovsxdq256_mask ((__v4si) __X,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi32_epi64(__X),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi16_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+_mm_mask_cvtepi16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovsxwd128_mask ((__v8hi) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepi16_epi32(__A),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtepi16_epi32(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovsxwd128_mask ((__v8hi) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepi16_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepi16_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+_mm256_mask_cvtepi16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovsxwd256_mask ((__v8hi) __A,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepi16_epi32(__A),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepi16_epi32 (__mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovsxwd256_mask ((__v8hi) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepi16_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepi16_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+_mm_mask_cvtepi16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovsxwq128_mask ((__v8hi) __A,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi16_epi64(__A),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovsxwq128_mask ((__v8hi) __A,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepi16_epi64(__A),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepi16_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+_mm256_mask_cvtepi16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovsxwq256_mask ((__v8hi) __A,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi16_epi64(__A),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepi16_epi64 (__mmask8 __U, __m128i __A)
+_mm256_maskz_cvtepi16_epi64(__mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovsxwq256_mask ((__v8hi) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepi16_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu8_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+_mm_mask_cvtepu8_epi32(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovzxbd128_mask ((__v16qi) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepu8_epi32(__A),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu8_epi32 (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovzxbd128_mask ((__v16qi) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepu8_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu8_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+_mm256_mask_cvtepu8_epi32(__m256i __W, __mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovzxbd256_mask ((__v16qi) __A,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepu8_epi32(__A),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepu8_epi32 (__mmask8 __U, __m128i __A)
+_mm256_maskz_cvtepu8_epi32(__mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovzxbd256_mask ((__v16qi) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepu8_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu8_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+_mm_mask_cvtepu8_epi64(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovzxbq128_mask ((__v16qi) __A,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu8_epi64(__A),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtepu8_epi64(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovzxbq128_mask ((__v16qi) __A,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu8_epi64(__A),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu8_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+_mm256_mask_cvtepu8_epi64(__m256i __W, __mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovzxbq256_mask ((__v16qi) __A,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu8_epi64(__A),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_cvtepu8_epi64 (__mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovzxbq256_mask ((__v16qi) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu8_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu32_epi64 (__m128i __W, __mmask8 __U, __m128i __X)
+_mm_mask_cvtepu32_epi64(__m128i __W, __mmask8 __U, __m128i __X)
{
- return (__m128i) __builtin_ia32_pmovzxdq128_mask ((__v4si) __X,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu32_epi64(__X),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu32_epi64 (__mmask8 __U, __m128i __X)
+_mm_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
{
- return (__m128i) __builtin_ia32_pmovzxdq128_mask ((__v4si) __X,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu32_epi64(__X),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu32_epi64 (__m256i __W, __mmask8 __U, __m128i __X)
+_mm256_mask_cvtepu32_epi64(__m256i __W, __mmask8 __U, __m128i __X)
{
- return (__m256i) __builtin_ia32_pmovzxdq256_mask ((__v4si) __X,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu32_epi64(__X),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepu32_epi64 (__mmask8 __U, __m128i __X)
+_mm256_maskz_cvtepu32_epi64(__mmask8 __U, __m128i __X)
{
- return (__m256i) __builtin_ia32_pmovzxdq256_mask ((__v4si) __X,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu32_epi64(__X),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu16_epi32 (__m128i __W, __mmask8 __U, __m128i __A)
+_mm_mask_cvtepu16_epi32(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovzxwd128_mask ((__v8hi) __A,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepu16_epi32(__A),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu16_epi32 (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovzxwd128_mask ((__v8hi) __A,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_cvtepu16_epi32(__A),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu16_epi32 (__m256i __W, __mmask8 __U, __m128i __A)
+_mm256_mask_cvtepu16_epi32(__m256i __W, __mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovzxwd256_mask ((__v8hi) __A,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepu16_epi32(__A),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepu16_epi32 (__mmask8 __U, __m128i __A)
+_mm256_maskz_cvtepu16_epi32(__mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovzxwd256_mask ((__v8hi) __A,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_cvtepu16_epi32(__A),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_cvtepu16_epi64 (__m128i __W, __mmask8 __U, __m128i __A)
+_mm_mask_cvtepu16_epi64(__m128i __W, __mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovzxwq128_mask ((__v8hi) __A,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu16_epi64(__A),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+_mm_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
{
- return (__m128i) __builtin_ia32_pmovzxwq128_mask ((__v8hi) __A,
- (__v2di)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_cvtepu16_epi64(__A),
+ (__v2di)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_cvtepu16_epi64 (__m256i __W, __mmask8 __U, __m128i __A)
+_mm256_mask_cvtepu16_epi64(__m256i __W, __mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovzxwq256_mask ((__v8hi) __A,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu16_epi64(__A),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_cvtepu16_epi64 (__mmask8 __U, __m128i __A)
+_mm256_maskz_cvtepu16_epi64(__mmask8 __U, __m128i __A)
{
- return (__m256i) __builtin_ia32_pmovzxwq256_mask ((__v8hi) __A,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_cvtepu16_epi64(__A),
+ (__v4di)_mm256_setzero_si256());
}
@@ -5125,125 +4939,132 @@ _mm256_maskz_rolv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
(__mmask8)(U)); })
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sll_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_sll_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pslld128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_sll_epi32(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sll_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_sll_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_pslld128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_sll_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sll_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
- __m128i __B)
+_mm256_mask_sll_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_pslld256_mask ((__v8si) __A,
- (__v4si) __B,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_sll_epi32(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sll_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+_mm256_maskz_sll_epi32(__mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_pslld256_mask ((__v8si) __A,
- (__v4si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_sll_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
-#define _mm_mask_slli_epi32(W, U, A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_pslldi128_mask((__v4si)(__m128i)(A), (int)(B), \
- (__v4si)(__m128i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_slli_epi32(__A, __B),
+ (__v4si)__W);
+}
-#define _mm_maskz_slli_epi32(U, A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_pslldi128_mask((__v4si)(__m128i)(A), (int)(B), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_slli_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
+}
-#define _mm256_mask_slli_epi32(W, U, A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_pslldi256_mask((__v8si)(__m256i)(A), (int)(B), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_slli_epi32(__A, __B),
+ (__v8si)__W);
+}
-#define _mm256_maskz_slli_epi32(U, A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_pslldi256_mask((__v8si)(__m256i)(A), (int)(B), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_slli_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
+}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sll_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_sll_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psllq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_sll_epi64(__A, __B),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sll_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_sll_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psllq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_sll_epi64(__A, __B),
+ (__v2di)_mm_setzero_di());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sll_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
- __m128i __B)
+_mm256_mask_sll_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psllq256_mask ((__v4di) __A,
- (__v2di) __B,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_sll_epi64(__A, __B),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sll_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+_mm256_maskz_sll_epi64(__mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psllq256_mask ((__v4di) __A,
- (__v2di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_sll_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
}
-#define _mm_mask_slli_epi64(W, U, A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_psllqi128_mask((__v2di)(__m128i)(A), (int)(B), \
- (__v2di)(__m128i)(W), \
- (__mmask8)(U)); })
-
-#define _mm_maskz_slli_epi64(U, A, B) __extension__ ({ \
- (__m128i)__builtin_ia32_psllqi128_mask((__v2di)(__m128i)(A), (int)(B), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_slli_epi64(__A, __B),
+ (__v2di)__W);
+}
-#define _mm256_mask_slli_epi64(W, U, A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_psllqi256_mask((__v4di)(__m256i)(A), (int)(B), \
- (__v4di)(__m256i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_slli_epi64(__A, __B),
+ (__v2di)_mm_setzero_di());
+}
-#define _mm256_maskz_slli_epi64(U, A, B) __extension__ ({ \
- (__m256i)__builtin_ia32_psllqi256_mask((__v4di)(__m256i)(A), (int)(B), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_slli_epi64(__A, __B),
+ (__v4di)__W);
+}
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_slli_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
+}
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_rorv_epi32 (__m128i __A, __m128i __B)
@@ -5366,387 +5187,335 @@ _mm256_maskz_rorv_epi64 (__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sllv_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
- __m128i __Y)
+_mm_mask_sllv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psllv2di_mask ((__v2di) __X,
- (__v2di) __Y,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_sllv_epi64(__X, __Y),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sllv_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+_mm_maskz_sllv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psllv2di_mask ((__v2di) __X,
- (__v2di) __Y,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_sllv_epi64(__X, __Y),
+ (__v2di)_mm_setzero_di());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sllv_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
- __m256i __Y)
+_mm256_mask_sllv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psllv4di_mask ((__v4di) __X,
- (__v4di) __Y,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_sllv_epi64(__X, __Y),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sllv_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+_mm256_maskz_sllv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psllv4di_mask ((__v4di) __X,
- (__v4di) __Y,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_sllv_epi64(__X, __Y),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sllv_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
- __m128i __Y)
+_mm_mask_sllv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psllv4si_mask ((__v4si) __X,
- (__v4si) __Y,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_sllv_epi32(__X, __Y),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sllv_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+_mm_maskz_sllv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psllv4si_mask ((__v4si) __X,
- (__v4si) __Y,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_sllv_epi32(__X, __Y),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sllv_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
- __m256i __Y)
+_mm256_mask_sllv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psllv8si_mask ((__v8si) __X,
- (__v8si) __Y,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_sllv_epi32(__X, __Y),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sllv_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+_mm256_maskz_sllv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psllv8si_mask ((__v8si) __X,
- (__v8si) __Y,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_sllv_epi32(__X, __Y),
+ (__v8si)_mm256_setzero_si256());
}
-
-
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_srlv_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
- __m128i __Y)
+_mm_mask_srlv_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psrlv2di_mask ((__v2di) __X,
- (__v2di) __Y,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_srlv_epi64(__X, __Y),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_srlv_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+_mm_maskz_srlv_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psrlv2di_mask ((__v2di) __X,
- (__v2di) __Y,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_srlv_epi64(__X, __Y),
+ (__v2di)_mm_setzero_di());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_srlv_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
- __m256i __Y)
+_mm256_mask_srlv_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psrlv4di_mask ((__v4di) __X,
- (__v4di) __Y,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_srlv_epi64(__X, __Y),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_srlv_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
+_mm256_maskz_srlv_epi64(__mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psrlv4di_mask ((__v4di) __X,
- (__v4di) __Y,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_srlv_epi64(__X, __Y),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_srlv_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
- __m128i __Y)
+_mm_mask_srlv_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psrlv4si_mask ((__v4si) __X,
- (__v4si) __Y,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_srlv_epi32(__X, __Y),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_srlv_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+_mm_maskz_srlv_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psrlv4si_mask ((__v4si) __X,
- (__v4si) __Y,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_srlv_epi32(__X, __Y),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_srlv_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
- __m256i __Y)
+_mm256_mask_srlv_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psrlv8si_mask ((__v8si) __X,
- (__v8si) __Y,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_srlv_epi32(__X, __Y),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_srlv_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+_mm256_maskz_srlv_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psrlv8si_mask ((__v8si) __X,
- (__v8si) __Y,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_srlv_epi32(__X, __Y),
+ (__v8si)_mm256_setzero_si256());
}
-
-
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_srl_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_srl_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrld128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_srl_epi32(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_srl_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_srl_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrld128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_srl_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_srl_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
- __m128i __B)
+_mm256_mask_srl_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psrld256_mask ((__v8si) __A,
- (__v4si) __B,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_srl_epi32(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_srl_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+_mm256_maskz_srl_epi32(__mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psrld256_mask ((__v8si) __A,
- (__v4si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_srl_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
-#define _mm_mask_srli_epi32(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psrldi128_mask((__v4si)(__m128i)(A), (int)(imm), \
- (__v4si)(__m128i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_srli_epi32(__A, __B),
+ (__v4si)__W);
+}
-#define _mm_maskz_srli_epi32(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psrldi128_mask((__v4si)(__m128i)(A), (int)(imm), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_srli_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
+}
-#define _mm256_mask_srli_epi32(W, U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psrldi256_mask((__v8si)(__m256i)(A), (int)(imm), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_srli_epi32(__A, __B),
+ (__v8si)__W);
+}
-#define _mm256_maskz_srli_epi32(U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psrldi256_mask((__v8si)(__m256i)(A), (int)(imm), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_srli_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
+}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_srl_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_srl_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrlq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_srl_epi64(__A, __B),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_srl_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_srl_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrlq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_srl_epi64(__A, __B),
+ (__v2di)_mm_setzero_di());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_srl_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
- __m128i __B)
+_mm256_mask_srl_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psrlq256_mask ((__v4di) __A,
- (__v2di) __B,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_srl_epi64(__A, __B),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_srl_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+_mm256_maskz_srl_epi64(__mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psrlq256_mask ((__v4di) __A,
- (__v2di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_srl_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
}
-#define _mm_mask_srli_epi64(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psrlqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
- (__v2di)(__m128i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_srli_epi64(__A, __B),
+ (__v2di)__W);
+}
-#define _mm_maskz_srli_epi64(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psrlqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
- (__v2di)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_srli_epi64(__A, __B),
+ (__v2di)_mm_setzero_di());
+}
-#define _mm256_mask_srli_epi64(W, U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psrlqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
- (__v4di)(__m256i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_srli_epi64(__A, __B),
+ (__v4di)__W);
+}
-#define _mm256_maskz_srli_epi64(U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psrlqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_srli_epi64(__A, __B),
+ (__v4di)_mm256_setzero_si256());
+}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_srav_epi32 (__m128i __W, __mmask8 __U, __m128i __X,
- __m128i __Y)
+_mm_mask_srav_epi32(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psrav4si_mask ((__v4si) __X,
- (__v4si) __Y,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_srav_epi32(__X, __Y),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_srav_epi32 (__mmask8 __U, __m128i __X, __m128i __Y)
+_mm_maskz_srav_epi32(__mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psrav4si_mask ((__v4si) __X,
- (__v4si) __Y,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_srav_epi32(__X, __Y),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_srav_epi32 (__m256i __W, __mmask8 __U, __m256i __X,
- __m256i __Y)
+_mm256_mask_srav_epi32(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psrav8si_mask ((__v8si) __X,
- (__v8si) __Y,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_srav_epi32(__X, __Y),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_srav_epi32 (__mmask8 __U, __m256i __X, __m256i __Y)
+_mm256_maskz_srav_epi32(__mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psrav8si_mask ((__v8si) __X,
- (__v8si) __Y,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_srav_epi32(__X, __Y),
+ (__v8si)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srav_epi64 (__m128i __X, __m128i __Y)
+_mm_srav_epi64(__m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
- (__v2di) __Y,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_psravq128((__v2di)__X, (__v2di)__Y);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_srav_epi64 (__m128i __W, __mmask8 __U, __m128i __X,
- __m128i __Y)
+_mm_mask_srav_epi64(__m128i __W, __mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
- (__v2di) __Y,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_srav_epi64(__X, __Y),
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_srav_epi64 (__mmask8 __U, __m128i __X, __m128i __Y)
+_mm_maskz_srav_epi64(__mmask8 __U, __m128i __X, __m128i __Y)
{
- return (__m128i) __builtin_ia32_psravq128_mask ((__v2di) __X,
- (__v2di) __Y,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
+ (__v2di)_mm_srav_epi64(__X, __Y),
+ (__v2di)_mm_setzero_di());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_srav_epi64 (__m256i __X, __m256i __Y)
+_mm256_srav_epi64(__m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
- (__v4di) __Y,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_psravq256((__v4di)__X, (__v4di) __Y);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_srav_epi64 (__m256i __W, __mmask8 __U, __m256i __X,
- __m256i __Y)
+_mm256_mask_srav_epi64(__m256i __W, __mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
- (__v4di) __Y,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_srav_epi64(__X, __Y),
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_srav_epi64 (__mmask8 __U, __m256i __X, __m256i __Y)
{
- return (__m256i) __builtin_ia32_psravq256_mask ((__v4di) __X,
- (__v4di) __Y,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
+ (__v4di)_mm256_srav_epi64(__X, __Y),
+ (__v4di)_mm256_setzero_si256());
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -5975,6 +5744,7 @@ _mm256_maskz_movedup_pd (__mmask8 __U, __m256d __A)
(__v8si)_mm256_setzero_si256(), \
(__mmask8)(M)); })
+#ifdef __x86_64__
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mask_set1_epi64 (__m128i __O, __mmask8 __M, long long __A)
{
@@ -6006,6 +5776,7 @@ _mm256_maskz_set1_epi64 (__mmask8 __M, long long __A)
_mm256_setzero_si256 (),
__M);
}
+#endif
#define _mm_fixupimm_pd(A, B, C, imm) __extension__ ({ \
(__m128d)__builtin_ia32_fixupimmpd128_mask((__v2df)(__m128d)(A), \
@@ -6653,85 +6424,67 @@ _mm256_maskz_rcp14_ps (__mmask8 __U, __m256 __A)
(__v8sf)_mm256_setzero_ps()); })
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mask_permutevar_pd (__m128d __W, __mmask8 __U, __m128d __A,
- __m128i __C)
+_mm_mask_permutevar_pd(__m128d __W, __mmask8 __U, __m128d __A, __m128i __C)
{
- return (__m128d) __builtin_ia32_vpermilvarpd_mask ((__v2df) __A,
- (__v2di) __C,
- (__v2df) __W,
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_permutevar_pd(__A, __C),
+ (__v2df)__W);
}
static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_maskz_permutevar_pd (__mmask8 __U, __m128d __A, __m128i __C)
+_mm_maskz_permutevar_pd(__mmask8 __U, __m128d __A, __m128i __C)
{
- return (__m128d) __builtin_ia32_vpermilvarpd_mask ((__v2df) __A,
- (__v2di) __C,
- (__v2df)
- _mm_setzero_pd (),
- (__mmask8) __U);
+ return (__m128d)__builtin_ia32_selectpd_128((__mmask8)__U,
+ (__v2df)_mm_permutevar_pd(__A, __C),
+ (__v2df)_mm_setzero_pd());
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_mask_permutevar_pd (__m256d __W, __mmask8 __U, __m256d __A,
- __m256i __C)
+_mm256_mask_permutevar_pd(__m256d __W, __mmask8 __U, __m256d __A, __m256i __C)
{
- return (__m256d) __builtin_ia32_vpermilvarpd256_mask ((__v4df) __A,
- (__v4di) __C,
- (__v4df) __W,
- (__mmask8)
- __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_permutevar_pd(__A, __C),
+ (__v4df)__W);
}
static __inline__ __m256d __DEFAULT_FN_ATTRS
-_mm256_maskz_permutevar_pd (__mmask8 __U, __m256d __A, __m256i __C)
+_mm256_maskz_permutevar_pd(__mmask8 __U, __m256d __A, __m256i __C)
{
- return (__m256d) __builtin_ia32_vpermilvarpd256_mask ((__v4df) __A,
- (__v4di) __C,
- (__v4df)
- _mm256_setzero_pd (),
- (__mmask8)
- __U);
+ return (__m256d)__builtin_ia32_selectpd_256((__mmask8)__U,
+ (__v4df)_mm256_permutevar_pd(__A, __C),
+ (__v4df)_mm256_setzero_pd());
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_mask_permutevar_ps (__m128 __W, __mmask8 __U, __m128 __A,
- __m128i __C)
+_mm_mask_permutevar_ps(__m128 __W, __mmask8 __U, __m128 __A, __m128i __C)
{
- return (__m128) __builtin_ia32_vpermilvarps_mask ((__v4sf) __A,
- (__v4si) __C,
- (__v4sf) __W,
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_permutevar_ps(__A, __C),
+ (__v4sf)__W);
}
static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_maskz_permutevar_ps (__mmask8 __U, __m128 __A, __m128i __C)
+_mm_maskz_permutevar_ps(__mmask8 __U, __m128 __A, __m128i __C)
{
- return (__m128) __builtin_ia32_vpermilvarps_mask ((__v4sf) __A,
- (__v4si) __C,
- (__v4sf)
- _mm_setzero_ps (),
- (__mmask8) __U);
+ return (__m128)__builtin_ia32_selectps_128((__mmask8)__U,
+ (__v4sf)_mm_permutevar_ps(__A, __C),
+ (__v4sf)_mm_setzero_ps());
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_mask_permutevar_ps (__m256 __W, __mmask8 __U, __m256 __A,
- __m256i __C)
+_mm256_mask_permutevar_ps(__m256 __W, __mmask8 __U, __m256 __A, __m256i __C)
{
- return (__m256) __builtin_ia32_vpermilvarps256_mask ((__v8sf) __A,
- (__v8si) __C,
- (__v8sf) __W,
- (__mmask8) __U);
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_permutevar_ps(__A, __C),
+ (__v8sf)__W);
}
static __inline__ __m256 __DEFAULT_FN_ATTRS
-_mm256_maskz_permutevar_ps (__mmask8 __U, __m256 __A, __m256i __C)
+_mm256_maskz_permutevar_ps(__mmask8 __U, __m256 __A, __m256i __C)
{
- return (__m256) __builtin_ia32_vpermilvarps256_mask ((__v8sf) __A,
- (__v8si) __C,
- (__v8sf)
- _mm256_setzero_ps (),
- (__mmask8) __U);
+ return (__m256)__builtin_ia32_selectps_256((__mmask8)__U,
+ (__v8sf)_mm256_permutevar_ps(__A, __C),
+ (__v8sf)_mm256_setzero_ps());
}
static __inline__ __mmask8 __DEFAULT_FN_ATTRS
@@ -6985,154 +6738,156 @@ _mm256_maskz_unpacklo_epi64(__mmask8 __U, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sra_epi32 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_sra_epi32(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrad128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_sra_epi32(__A, __B),
+ (__v4si)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sra_epi32 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_sra_epi32(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psrad128_mask ((__v4si) __A,
- (__v4si) __B,
- (__v4si)
- _mm_setzero_si128 (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_sra_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sra_epi32 (__m256i __W, __mmask8 __U, __m256i __A,
- __m128i __B)
+_mm256_mask_sra_epi32(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psrad256_mask ((__v8si) __A,
- (__v4si) __B,
- (__v8si) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_sra_epi32(__A, __B),
+ (__v8si)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sra_epi32 (__mmask8 __U, __m256i __A, __m128i __B)
+_mm256_maskz_sra_epi32(__mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psrad256_mask ((__v8si) __A,
- (__v4si) __B,
- (__v8si)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_sra_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
}
-#define _mm_mask_srai_epi32(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psradi128_mask((__v4si)(__m128i)(A), (int)(imm), \
- (__v4si)(__m128i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_srai_epi32(__A, __B),
+ (__v4si)__W);
+}
-#define _mm_maskz_srai_epi32(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psradi128_mask((__v4si)(__m128i)(A), (int)(imm), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, int __B)
+{
+ return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
+ (__v4si)_mm_srai_epi32(__A, __B),
+ (__v4si)_mm_setzero_si128());
+}
-#define _mm256_mask_srai_epi32(W, U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psradi256_mask((__v8si)(__m256i)(A), (int)(imm), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_srai_epi32(__A, __B),
+ (__v8si)__W);
+}
-#define _mm256_maskz_srai_epi32(U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psradi256_mask((__v8si)(__m256i)(A), (int)(imm), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, int __B)
+{
+ return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
+ (__v8si)_mm256_srai_epi32(__A, __B),
+ (__v8si)_mm256_setzero_si256());
+}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi64 (__m128i __A, __m128i __B)
+_mm_sra_epi64(__m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) -1);
+ return (__m128i)__builtin_ia32_psraq128((__v2di)__A, (__v2di)__B);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mask_sra_epi64 (__m128i __W, __mmask8 __U, __m128i __A,
- __m128i __B)
+_mm_mask_sra_epi64(__m128i __W, __mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di) __W,
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
+ (__v2di)_mm_sra_epi64(__A, __B), \
+ (__v2di)__W);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_maskz_sra_epi64 (__mmask8 __U, __m128i __A, __m128i __B)
+_mm_maskz_sra_epi64(__mmask8 __U, __m128i __A, __m128i __B)
{
- return (__m128i) __builtin_ia32_psraq128_mask ((__v2di) __A,
- (__v2di) __B,
- (__v2di)
- _mm_setzero_di (),
- (__mmask8) __U);
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
+ (__v2di)_mm_sra_epi64(__A, __B), \
+ (__v2di)_mm_setzero_di());
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_sra_epi64 (__m256i __A, __m128i __B)
+_mm256_sra_epi64(__m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
- (__v2di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) -1);
+ return (__m256i)__builtin_ia32_psraq256((__v4di) __A, (__v2di) __B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_mask_sra_epi64 (__m256i __W, __mmask8 __U, __m256i __A,
- __m128i __B)
+_mm256_mask_sra_epi64(__m256i __W, __mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
- (__v2di) __B,
- (__v4di) __W,
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
+ (__v4di)_mm256_sra_epi64(__A, __B), \
+ (__v4di)__W);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
-_mm256_maskz_sra_epi64 (__mmask8 __U, __m256i __A, __m128i __B)
+_mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
{
- return (__m256i) __builtin_ia32_psraq256_mask ((__v4di) __A,
- (__v2di) __B,
- (__v4di)
- _mm256_setzero_si256 (),
- (__mmask8) __U);
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
+ (__v4di)_mm256_sra_epi64(__A, __B), \
+ (__v4di)_mm256_setzero_si256());
}
-#define _mm_srai_epi64(A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)-1); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_srai_epi64(__m128i __A, int __imm)
+{
+ return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
+}
-#define _mm_mask_srai_epi64(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
- (__v2di)(__m128i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_mask_srai_epi64(__m128i __W, __mmask8 __U, __m128i __A, int __imm)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
+ (__v2di)_mm_srai_epi64(__A, __imm), \
+ (__v2di)__W);
+}
-#define _mm_maskz_srai_epi64(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_psraqi128_mask((__v2di)(__m128i)(A), (int)(imm), \
- (__v2di)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, int __imm)
+{
+ return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U, \
+ (__v2di)_mm_srai_epi64(__A, __imm), \
+ (__v2di)_mm_setzero_di());
+}
-#define _mm256_srai_epi64(A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)-1); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_srai_epi64(__m256i __A, int __imm)
+{
+ return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
+}
-#define _mm256_mask_srai_epi64(W, U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
- (__v4di)(__m256i)(W), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_mask_srai_epi64(__m256i __W, __mmask8 __U, __m256i __A, int __imm)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
+ (__v4di)_mm256_srai_epi64(__A, __imm), \
+ (__v4di)__W);
+}
-#define _mm256_maskz_srai_epi64(U, A, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_psraqi256_mask((__v4di)(__m256i)(A), (int)(imm), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+_mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, int __imm)
+{
+ return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U, \
+ (__v4di)_mm256_srai_epi64(__A, __imm), \
+ (__v4di)_mm256_setzero_si256());
+}
#define _mm_ternarylogic_epi32(A, B, C, imm) __extension__ ({ \
(__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
@@ -8473,79 +8228,84 @@ _mm256_mask_cvtepi64_storeu_epi16 (void * __P, __mmask8 __M, __m256i __A)
}
#define _mm256_extractf32x4_ps(A, imm) __extension__ ({ \
- (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
- (int)(imm), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)-1); })
+ (__m128)__builtin_shufflevector((__v8sf)(__m256)(A), \
+ (__v8sf)_mm256_undefined_ps(), \
+ ((imm) & 1) ? 4 : 0, \
+ ((imm) & 1) ? 5 : 1, \
+ ((imm) & 1) ? 6 : 2, \
+ ((imm) & 1) ? 7 : 3); })
#define _mm256_mask_extractf32x4_ps(W, U, A, imm) __extension__ ({ \
- (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
- (int)(imm), \
- (__v4sf)(__m128)(W), \
- (__mmask8)(U)); })
+ (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm256_extractf32x4_ps((A), (imm)), \
+ (__v4sf)(W)); })
#define _mm256_maskz_extractf32x4_ps(U, A, imm) __extension__ ({ \
- (__m128)__builtin_ia32_extractf32x4_256_mask((__v8sf)(__m256)(A), \
- (int)(imm), \
- (__v4sf)_mm_setzero_ps(), \
- (__mmask8)(U)); })
+ (__m128)__builtin_ia32_selectps_128((__mmask8)(U), \
+ (__v4sf)_mm256_extractf32x4_ps((A), (imm)), \
+ (__v4sf)_mm_setzero_ps()); })
#define _mm256_extracti32x4_epi32(A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
- (int)(imm), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)-1); })
+ (__m128i)__builtin_shufflevector((__v8si)(__m256)(A), \
+ (__v8si)_mm256_undefined_si256(), \
+ ((imm) & 1) ? 4 : 0, \
+ ((imm) & 1) ? 5 : 1, \
+ ((imm) & 1) ? 6 : 2, \
+ ((imm) & 1) ? 7 : 3); })
#define _mm256_mask_extracti32x4_epi32(W, U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
- (int)(imm), \
- (__v4si)(__m128i)(W), \
- (__mmask8)(U)); })
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm256_extracti32x4_epi32((A), (imm)), \
+ (__v4si)(W)); })
#define _mm256_maskz_extracti32x4_epi32(U, A, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_extracti32x4_256_mask((__v8si)(__m256i)(A), \
- (int)(imm), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm256_extracti32x4_epi32((A), (imm)), \
+ (__v4si)_mm_setzero_si128()); })
#define _mm256_insertf32x4(A, B, imm) __extension__ ({ \
- (__m256)__builtin_ia32_insertf32x4_256_mask((__v8sf)(__m256)(A), \
- (__v4sf)(__m128)(B), (int)(imm), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)-1); })
+ (__m256)__builtin_shufflevector((__v8sf)(A), \
+ (__v8sf)_mm256_castps128_ps256((__m128)(B)), \
+ ((imm) & 0x1) ? 0 : 8, \
+ ((imm) & 0x1) ? 1 : 9, \
+ ((imm) & 0x1) ? 2 : 10, \
+ ((imm) & 0x1) ? 3 : 11, \
+ ((imm) & 0x1) ? 8 : 4, \
+ ((imm) & 0x1) ? 9 : 5, \
+ ((imm) & 0x1) ? 10 : 6, \
+ ((imm) & 0x1) ? 11 : 7); })
#define _mm256_mask_insertf32x4(W, U, A, B, imm) __extension__ ({ \
- (__m256)__builtin_ia32_insertf32x4_256_mask((__v8sf)(__m256)(A), \
- (__v4sf)(__m128)(B), (int)(imm), \
- (__v8sf)(__m256)(W), \
- (__mmask8)(U)); })
+ (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
+ (__v8sf)(W)); })
#define _mm256_maskz_insertf32x4(U, A, B, imm) __extension__ ({ \
- (__m256)__builtin_ia32_insertf32x4_256_mask((__v8sf)(__m256)(A), \
- (__v4sf)(__m128)(B), (int)(imm), \
- (__v8sf)_mm256_setzero_ps(), \
- (__mmask8)(U)); })
+ (__m256)__builtin_ia32_selectps_256((__mmask8)(U), \
+ (__v8sf)_mm256_insertf32x4((A), (B), (imm)), \
+ (__v8sf)_mm256_setzero_ps()); })
#define _mm256_inserti32x4(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_inserti32x4_256_mask((__v8si)(__m256i)(A), \
- (__v4si)(__m128i)(B), \
- (int)(imm), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)-1); })
+ (__m256i)__builtin_shufflevector((__v8si)(A), \
+ (__v8si)_mm256_castsi128_si256((__m128i)(B)), \
+ ((imm) & 0x1) ? 0 : 8, \
+ ((imm) & 0x1) ? 1 : 9, \
+ ((imm) & 0x1) ? 2 : 10, \
+ ((imm) & 0x1) ? 3 : 11, \
+ ((imm) & 0x1) ? 8 : 4, \
+ ((imm) & 0x1) ? 9 : 5, \
+ ((imm) & 0x1) ? 10 : 6, \
+ ((imm) & 0x1) ? 11 : 7); })
#define _mm256_mask_inserti32x4(W, U, A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_inserti32x4_256_mask((__v8si)(__m256i)(A), \
- (__v4si)(__m128i)(B), \
- (int)(imm), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
+ (__v8si)(W)); })
#define _mm256_maskz_inserti32x4(U, A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_inserti32x4_256_mask((__v8si)(__m256i)(A), \
- (__v4si)(__m128i)(B), \
- (int)(imm), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_inserti32x4((A), (B), (imm)), \
+ (__v8si)_mm256_setzero_si256()); })
#define _mm_getmant_pd(A, B, C) __extension__({\
(__m128d)__builtin_ia32_getmantpd128_mask((__v2df)(__m128d)(A), \
@@ -8860,76 +8620,78 @@ _mm256_permutexvar_epi32 (__m256i __X, __m256i __Y)
}
#define _mm_alignr_epi32(A, B, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_alignd128_mask((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (int)(imm), \
- (__v4si)_mm_undefined_si128(), \
- (__mmask8)-1); })
+ (__m128i)__builtin_shufflevector((__v4si)(__m128i)(B), \
+ (__v4si)(__m128i)(A), \
+ ((int)(imm) & 0x3) + 0, \
+ ((int)(imm) & 0x3) + 1, \
+ ((int)(imm) & 0x3) + 2, \
+ ((int)(imm) & 0x3) + 3); })
#define _mm_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_alignd128_mask((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (int)(imm), \
- (__v4si)(__m128i)(W), \
- (__mmask8)(U)); })
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
+ (__v4si)(__m128i)(W)); })
#define _mm_maskz_alignr_epi32(U, A, B, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_alignd128_mask((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), (int)(imm), \
- (__v4si)_mm_setzero_si128(), \
- (__mmask8)(U)); })
+ (__m128i)__builtin_ia32_selectd_128((__mmask8)(U), \
+ (__v4si)_mm_alignr_epi32((A), (B), (imm)), \
+ (__v4si)_mm_setzero_si128()); })
#define _mm256_alignr_epi32(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_alignd256_mask((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), (int)(imm), \
- (__v8si)_mm256_undefined_si256(), \
- (__mmask8)-1); })
+ (__m256i)__builtin_shufflevector((__v8si)(__m256i)(B), \
+ (__v8si)(__m256i)(A), \
+ ((int)(imm) & 0x7) + 0, \
+ ((int)(imm) & 0x7) + 1, \
+ ((int)(imm) & 0x7) + 2, \
+ ((int)(imm) & 0x7) + 3, \
+ ((int)(imm) & 0x7) + 4, \
+ ((int)(imm) & 0x7) + 5, \
+ ((int)(imm) & 0x7) + 6, \
+ ((int)(imm) & 0x7) + 7); })
#define _mm256_mask_alignr_epi32(W, U, A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_alignd256_mask((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), (int)(imm), \
- (__v8si)(__m256i)(W), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
+ (__v8si)(__m256i)(W)); })
#define _mm256_maskz_alignr_epi32(U, A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_alignd256_mask((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), (int)(imm), \
- (__v8si)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectd_256((__mmask8)(U), \
+ (__v8si)_mm256_alignr_epi32((A), (B), (imm)), \
+ (__v8si)_mm256_setzero_si256()); })
#define _mm_alignr_epi64(A, B, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (int)(imm), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)-1); })
+ (__m128i)__builtin_shufflevector((__v2di)(__m128i)(B), \
+ (__v2di)(__m128i)(A), \
+ ((int)(imm) & 0x1) + 0, \
+ ((int)(imm) & 0x1) + 1); })
#define _mm_mask_alignr_epi64(W, U, A, B, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (int)(imm), \
- (__v2di)(__m128i)(W), \
- (__mmask8)(U)); })
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
+ (__v2di)(__m128i)(W)); })
#define _mm_maskz_alignr_epi64(U, A, B, imm) __extension__ ({ \
- (__m128i)__builtin_ia32_alignq128_mask((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), (int)(imm), \
- (__v2di)_mm_setzero_di(), \
- (__mmask8)(U)); })
+ (__m128i)__builtin_ia32_selectq_128((__mmask8)(U), \
+ (__v2di)_mm_alignr_epi64((A), (B), (imm)), \
+ (__v2di)_mm_setzero_di()); })
#define _mm256_alignr_epi64(A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_alignq256_mask((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), (int)(imm), \
- (__v4di)_mm256_undefined_pd(), \
- (__mmask8)-1); })
+ (__m256i)__builtin_shufflevector((__v4di)(__m256i)(B), \
+ (__v4di)(__m256i)(A), \
+ ((int)(imm) & 0x3) + 0, \
+ ((int)(imm) & 0x3) + 1, \
+ ((int)(imm) & 0x3) + 2, \
+ ((int)(imm) & 0x3) + 3); })
#define _mm256_mask_alignr_epi64(W, U, A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_alignq256_mask((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), (int)(imm), \
- (__v4di)(__m256i)(W), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
+ (__v4di)(__m256i)(W)); })
#define _mm256_maskz_alignr_epi64(U, A, B, imm) __extension__ ({ \
- (__m256i)__builtin_ia32_alignq256_mask((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), (int)(imm), \
- (__v4di)_mm256_setzero_si256(), \
- (__mmask8)(U)); })
+ (__m256i)__builtin_ia32_selectq_256((__mmask8)(U), \
+ (__v4di)_mm256_alignr_epi64((A), (B), (imm)), \
+ (__v4di)_mm256_setzero_si256()); })
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_mask_movehdup_ps (__m128 __W, __mmask8 __U, __m128 __A)
diff --git a/lib/Headers/avxintrin.h b/lib/Headers/avxintrin.h
index 32e8546817b3..be03ba346031 100644
--- a/lib/Headers/avxintrin.h
+++ b/lib/Headers/avxintrin.h
@@ -57,7 +57,7 @@ typedef long long __m256i __attribute__((__vector_size__(32)));
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VADDPD / ADDPD instruction.
+/// This intrinsic corresponds to the <c> VADDPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing one of the source operands.
@@ -75,7 +75,7 @@ _mm256_add_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VADDPS / ADDPS instruction.
+/// This intrinsic corresponds to the <c> VADDPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing one of the source operands.
@@ -93,7 +93,7 @@ _mm256_add_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VSUBPD / SUBPD instruction.
+/// This intrinsic corresponds to the <c> VSUBPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing the minuend.
@@ -111,7 +111,7 @@ _mm256_sub_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VSUBPS / SUBPS instruction.
+/// This intrinsic corresponds to the <c> VSUBPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing the minuend.
@@ -130,7 +130,7 @@ _mm256_sub_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VADDSUBPD / ADDSUBPD instruction.
+/// This intrinsic corresponds to the <c> VADDSUBPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing the left source operand.
@@ -149,7 +149,7 @@ _mm256_addsub_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VADDSUBPS / ADDSUBPS instruction.
+/// This intrinsic corresponds to the <c> VADDSUBPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing the left source operand.
@@ -167,7 +167,7 @@ _mm256_addsub_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VDIVPD / DIVPD instruction.
+/// This intrinsic corresponds to the <c> VDIVPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing the dividend.
@@ -185,7 +185,7 @@ _mm256_div_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VDIVPS / DIVPS instruction.
+/// This intrinsic corresponds to the <c> VDIVPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing the dividend.
@@ -204,7 +204,7 @@ _mm256_div_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMAXPD / MAXPD instruction.
+/// This intrinsic corresponds to the <c> VMAXPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing one of the operands.
@@ -223,7 +223,7 @@ _mm256_max_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMAXPS / MAXPS instruction.
+/// This intrinsic corresponds to the <c> VMAXPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing one of the operands.
@@ -242,7 +242,7 @@ _mm256_max_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMINPD / MINPD instruction.
+/// This intrinsic corresponds to the <c> VMINPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing one of the operands.
@@ -261,7 +261,7 @@ _mm256_min_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMINPS / MINPS instruction.
+/// This intrinsic corresponds to the <c> VMINPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing one of the operands.
@@ -279,7 +279,7 @@ _mm256_min_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMULPD / MULPD instruction.
+/// This intrinsic corresponds to the <c> VMULPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing one of the operands.
@@ -297,7 +297,7 @@ _mm256_mul_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMULPS / MULPS instruction.
+/// This intrinsic corresponds to the <c> VMULPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing one of the operands.
@@ -316,7 +316,7 @@ _mm256_mul_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VSQRTPD / SQRTPD instruction.
+/// This intrinsic corresponds to the <c> VSQRTPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double].
@@ -333,7 +333,7 @@ _mm256_sqrt_pd(__m256d __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VSQRTPS / SQRTPS instruction.
+/// This intrinsic corresponds to the <c> VSQRTPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float].
@@ -350,7 +350,7 @@ _mm256_sqrt_ps(__m256 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VRSQRTPS / RSQRTPS instruction.
+/// This intrinsic corresponds to the <c> VRSQRTPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float].
@@ -367,7 +367,7 @@ _mm256_rsqrt_ps(__m256 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VRCPPS / RCPPS instruction.
+/// This intrinsic corresponds to the <c> VRCPPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float].
@@ -389,24 +389,24 @@ _mm256_rcp_ps(__m256 __a)
/// __m256d _mm256_round_pd(__m256d V, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the \c VROUNDPD / ROUNDPD instruction.
+/// This intrinsic corresponds to the <c> VROUNDPD </c> instruction.
///
/// \param V
/// A 256-bit vector of [4 x double].
/// \param M
-/// An integer value that specifies the rounding operation.
-/// Bits [7:4] are reserved.
-/// Bit [3] is a precision exception value:
-/// 0: A normal PE exception is used.
-/// 1: The PE field is not updated.
-/// Bit [2] is the rounding control source:
-/// 0: Use bits [1:0] of M.
-/// 1: Use the current MXCSR setting.
-/// Bits [1:0] contain the rounding control definition:
-/// 00: Nearest.
-/// 01: Downward (toward negative infinity).
-/// 10: Upward (toward positive infinity).
-/// 11: Truncated.
+/// An integer value that specifies the rounding operation. \n
+/// Bits [7:4] are reserved. \n
+/// Bit [3] is a precision exception value: \n
+/// 0: A normal PE exception is used. \n
+/// 1: The PE field is not updated. \n
+/// Bit [2] is the rounding control source: \n
+/// 0: Use bits [1:0] of \a M. \n
+/// 1: Use the current MXCSR setting. \n
+/// Bits [1:0] contain the rounding control definition: \n
+/// 00: Nearest. \n
+/// 01: Downward (toward negative infinity). \n
+/// 10: Upward (toward positive infinity). \n
+/// 11: Truncated.
/// \returns A 256-bit vector of [4 x double] containing the rounded values.
#define _mm256_round_pd(V, M) __extension__ ({ \
(__m256d)__builtin_ia32_roundpd256((__v4df)(__m256d)(V), (M)); })
@@ -421,24 +421,24 @@ _mm256_rcp_ps(__m256 __a)
/// __m256 _mm256_round_ps(__m256 V, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the \c VROUNDPS / ROUNDPS instruction.
+/// This intrinsic corresponds to the <c> VROUNDPS </c> instruction.
///
/// \param V
/// A 256-bit vector of [8 x float].
/// \param M
-/// An integer value that specifies the rounding operation.
-/// Bits [7:4] are reserved.
-/// Bit [3] is a precision exception value:
-/// 0: A normal PE exception is used.
-/// 1: The PE field is not updated.
-/// Bit [2] is the rounding control source:
-/// 0: Use bits [1:0] of M.
-/// 1: Use the current MXCSR setting.
-/// Bits [1:0] contain the rounding control definition:
-/// 00: Nearest.
-/// 01: Downward (toward negative infinity).
-/// 10: Upward (toward positive infinity).
-/// 11: Truncated.
+/// An integer value that specifies the rounding operation. \n
+/// Bits [7:4] are reserved. \n
+/// Bit [3] is a precision exception value: \n
+/// 0: A normal PE exception is used. \n
+/// 1: The PE field is not updated. \n
+/// Bit [2] is the rounding control source: \n
+/// 0: Use bits [1:0] of \a M. \n
+/// 1: Use the current MXCSR setting. \n
+/// Bits [1:0] contain the rounding control definition: \n
+/// 00: Nearest. \n
+/// 01: Downward (toward negative infinity). \n
+/// 10: Upward (toward positive infinity). \n
+/// 11: Truncated.
/// \returns A 256-bit vector of [8 x float] containing the rounded values.
#define _mm256_round_ps(V, M) __extension__ ({ \
(__m256)__builtin_ia32_roundps256((__v8sf)(__m256)(V), (M)); })
@@ -453,7 +453,7 @@ _mm256_rcp_ps(__m256 __a)
/// __m256d _mm256_ceil_pd(__m256d V);
/// \endcode
///
-/// This intrinsic corresponds to the \c VROUNDPD / ROUNDPD instruction.
+/// This intrinsic corresponds to the <c> VROUNDPD </c> instruction.
///
/// \param V
/// A 256-bit vector of [4 x double].
@@ -470,7 +470,7 @@ _mm256_rcp_ps(__m256 __a)
/// __m256d _mm256_floor_pd(__m256d V);
/// \endcode
///
-/// This intrinsic corresponds to the \c VROUNDPD / ROUNDPD instruction.
+/// This intrinsic corresponds to the <c> VROUNDPD </c> instruction.
///
/// \param V
/// A 256-bit vector of [4 x double].
@@ -488,7 +488,7 @@ _mm256_rcp_ps(__m256 __a)
/// __m256 _mm256_ceil_ps(__m256 V);
/// \endcode
///
-/// This intrinsic corresponds to the \c VROUNDPS / ROUNDPS instruction.
+/// This intrinsic corresponds to the <c> VROUNDPS </c> instruction.
///
/// \param V
/// A 256-bit vector of [8 x float].
@@ -505,7 +505,7 @@ _mm256_rcp_ps(__m256 __a)
/// __m256 _mm256_floor_ps(__m256 V);
/// \endcode
///
-/// This intrinsic corresponds to the \c VROUNDPS / ROUNDPS instruction.
+/// This intrinsic corresponds to the <c> VROUNDPS </c> instruction.
///
/// \param V
/// A 256-bit vector of [8 x float].
@@ -517,7 +517,7 @@ _mm256_rcp_ps(__m256 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VANDPD / ANDPD instruction.
+/// This intrinsic corresponds to the <c> VANDPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing one of the source operands.
@@ -535,7 +535,7 @@ _mm256_and_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VANDPS / ANDPS instruction.
+/// This intrinsic corresponds to the <c> VANDPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing one of the source operands.
@@ -554,7 +554,7 @@ _mm256_and_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VANDNPD / ANDNPD instruction.
+/// This intrinsic corresponds to the <c> VANDNPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing the left source operand. The
@@ -575,7 +575,7 @@ _mm256_andnot_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VANDNPS / ANDNPS instruction.
+/// This intrinsic corresponds to the <c> VANDNPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing the left source operand. The
@@ -595,7 +595,7 @@ _mm256_andnot_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VORPD / ORPD instruction.
+/// This intrinsic corresponds to the <c> VORPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing one of the source operands.
@@ -613,7 +613,7 @@ _mm256_or_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VORPS / ORPS instruction.
+/// This intrinsic corresponds to the <c> VORPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing one of the source operands.
@@ -631,7 +631,7 @@ _mm256_or_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VXORPD / XORPD instruction.
+/// This intrinsic corresponds to the <c> VXORPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing one of the source operands.
@@ -649,7 +649,7 @@ _mm256_xor_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VXORPS / XORPS instruction.
+/// This intrinsic corresponds to the <c> VXORPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing one of the source operands.
@@ -669,7 +669,7 @@ _mm256_xor_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VHADDPD / HADDPD instruction.
+/// This intrinsic corresponds to the <c> VHADDPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing one of the source operands.
@@ -692,7 +692,7 @@ _mm256_hadd_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VHADDPS / HADDPS instruction.
+/// This intrinsic corresponds to the <c> VHADDPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing one of the source operands.
@@ -715,7 +715,7 @@ _mm256_hadd_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VHSUBPD / HSUBPD instruction.
+/// This intrinsic corresponds to the <c> VHSUBPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double] containing one of the source operands.
@@ -738,7 +738,7 @@ _mm256_hsub_pd(__m256d __a, __m256d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VHSUBPS / HSUBPS instruction.
+/// This intrinsic corresponds to the <c> VHSUBPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float] containing one of the source operands.
@@ -762,23 +762,23 @@ _mm256_hsub_ps(__m256 __a, __m256 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+/// This intrinsic corresponds to the <c> VPERMILPD </c> instruction.
///
/// \param __a
/// A 128-bit vector of [2 x double].
/// \param __c
/// A 128-bit integer vector operand specifying how the values are to be
-/// copied.
-/// Bit [1]:
-/// 0: Bits [63:0] of the source are copied to bits [63:0] of the
-/// returned vector.
-/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
-/// returned vector.
-/// Bit [65]:
-/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
-/// returned vector.
-/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
-/// returned vector.
+/// copied. \n
+/// Bit [1]: \n
+/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned
+/// vector. \n
+/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
+/// returned vector. \n
+/// Bit [65]: \n
+/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
+/// returned vector. \n
+/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
+/// returned vector.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
static __inline __m128d __DEFAULT_FN_ATTRS
_mm_permutevar_pd(__m128d __a, __m128i __c)
@@ -786,37 +786,37 @@ _mm_permutevar_pd(__m128d __a, __m128i __c)
return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)__a, (__v2di)__c);
}
-/// \brief Copies the values in a 256-bit vector of [4 x double] as
-/// specified by the 256-bit integer vector operand.
+/// \brief Copies the values in a 256-bit vector of [4 x double] as specified
+/// by the 256-bit integer vector operand.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+/// This intrinsic corresponds to the <c> VPERMILPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double].
/// \param __c
/// A 256-bit integer vector operand specifying how the values are to be
-/// copied.
-/// Bit [1]:
-/// 0: Bits [63:0] of the source are copied to bits [63:0] of the
-/// returned vector.
-/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
-/// returned vector.
-/// Bit [65]:
-/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
-/// returned vector.
-/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
-/// returned vector.
-/// Bit [129]:
-/// 0: Bits [191:128] of the source are copied to bits [191:128] of the
-/// returned vector.
-/// 1: Bits [255:192] of the source are copied to bits [191:128] of the
-/// returned vector.
-/// Bit [193]:
-/// 0: Bits [191:128] of the source are copied to bits [255:192] of the
-/// returned vector.
-/// 1: Bits [255:192] of the source are copied to bits [255:192] of the
+/// copied. \n
+/// Bit [1]: \n
+/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned
+/// vector. \n
+/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
+/// returned vector. \n
+/// Bit [65]: \n
+/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
+/// returned vector. \n
+/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
+/// returned vector. \n
+/// Bit [129]: \n
+/// 0: Bits [191:128] of the source are copied to bits [191:128] of the
+/// returned vector. \n
+/// 1: Bits [255:192] of the source are copied to bits [191:128] of the
+/// returned vector. \n
+/// Bit [193]: \n
+/// 0: Bits [191:128] of the source are copied to bits [255:192] of the
+/// returned vector. \n
+/// 1: Bits [255:192] of the source are copied to bits [255:192] of the
/// returned vector.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
static __inline __m256d __DEFAULT_FN_ATTRS
@@ -827,52 +827,51 @@ _mm256_permutevar_pd(__m256d __a, __m256i __c)
/// \brief Copies the values stored in a 128-bit vector of [4 x float] as
/// specified by the 128-bit integer vector operand.
-///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+/// This intrinsic corresponds to the <c> VPERMILPS </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
/// \param __c
/// A 128-bit integer vector operand specifying how the values are to be
-/// copied.
-/// Bits [1:0]:
-/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// Bits [33:32]:
-/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// Bits [65:64]:
-/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// Bits [97:96]:
-/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
-/// returned vector.
+/// copied. \n
+/// Bits [1:0]: \n
+/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// Bits [33:32]: \n
+/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// Bits [65:64]: \n
+/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// Bits [97:96]: \n
+/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
+/// returned vector.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
static __inline __m128 __DEFAULT_FN_ATTRS
_mm_permutevar_ps(__m128 __a, __m128i __c)
@@ -885,85 +884,85 @@ _mm_permutevar_ps(__m128 __a, __m128i __c)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+/// This intrinsic corresponds to the <c> VPERMILPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float].
/// \param __c
/// A 256-bit integer vector operand specifying how the values are to be
-/// copied.
-/// Bits [1:0]:
-/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// Bits [33:32]:
-/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// Bits [65:64]:
-/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// Bits [97:96]:
-/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// Bits [129:128]:
-/// 00: Bits [159:128] of the source are copied to bits [159:128] of the
-/// returned vector.
-/// 01: Bits [191:160] of the source are copied to bits [159:128] of the
-/// returned vector.
-/// 10: Bits [223:192] of the source are copied to bits [159:128] of the
-/// returned vector.
-/// 11: Bits [255:224] of the source are copied to bits [159:128] of the
-/// returned vector.
-/// Bits [161:160]:
-/// 00: Bits [159:128] of the source are copied to bits [191:160] of the
-/// returned vector.
-/// 01: Bits [191:160] of the source are copied to bits [191:160] of the
-/// returned vector.
-/// 10: Bits [223:192] of the source are copied to bits [191:160] of the
-/// returned vector.
-/// 11: Bits [255:224] of the source are copied to bits [191:160] of the
-/// returned vector.
-/// Bits [193:192]:
-/// 00: Bits [159:128] of the source are copied to bits [223:192] of the
-/// returned vector.
-/// 01: Bits [191:160] of the source are copied to bits [223:192] of the
-/// returned vector.
-/// 10: Bits [223:192] of the source are copied to bits [223:192] of the
-/// returned vector.
-/// 11: Bits [255:224] of the source are copied to bits [223:192] of the
-/// returned vector.
-/// Bits [225:224]:
-/// 00: Bits [159:128] of the source are copied to bits [255:224] of the
-/// returned vector.
-/// 01: Bits [191:160] of the source are copied to bits [255:224] of the
-/// returned vector.
-/// 10: Bits [223:192] of the source are copied to bits [255:224] of the
-/// returned vector.
-/// 11: Bits [255:224] of the source are copied to bits [255:224] of the
-/// returned vector.
+/// copied. \n
+/// Bits [1:0]: \n
+/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// Bits [33:32]: \n
+/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// Bits [65:64]: \n
+/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// Bits [97:96]: \n
+/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// Bits [129:128]: \n
+/// 00: Bits [159:128] of the source are copied to bits [159:128] of the
+/// returned vector. \n
+/// 01: Bits [191:160] of the source are copied to bits [159:128] of the
+/// returned vector. \n
+/// 10: Bits [223:192] of the source are copied to bits [159:128] of the
+/// returned vector. \n
+/// 11: Bits [255:224] of the source are copied to bits [159:128] of the
+/// returned vector. \n
+/// Bits [161:160]: \n
+/// 00: Bits [159:128] of the source are copied to bits [191:160] of the
+/// returned vector. \n
+/// 01: Bits [191:160] of the source are copied to bits [191:160] of the
+/// returned vector. \n
+/// 10: Bits [223:192] of the source are copied to bits [191:160] of the
+/// returned vector. \n
+/// 11: Bits [255:224] of the source are copied to bits [191:160] of the
+/// returned vector. \n
+/// Bits [193:192]: \n
+/// 00: Bits [159:128] of the source are copied to bits [223:192] of the
+/// returned vector. \n
+/// 01: Bits [191:160] of the source are copied to bits [223:192] of the
+/// returned vector. \n
+/// 10: Bits [223:192] of the source are copied to bits [223:192] of the
+/// returned vector. \n
+/// 11: Bits [255:224] of the source are copied to bits [223:192] of the
+/// returned vector. \n
+/// Bits [225:224]: \n
+/// 00: Bits [159:128] of the source are copied to bits [255:224] of the
+/// returned vector. \n
+/// 01: Bits [191:160] of the source are copied to bits [255:224] of the
+/// returned vector. \n
+/// 10: Bits [223:192] of the source are copied to bits [255:224] of the
+/// returned vector. \n
+/// 11: Bits [255:224] of the source are copied to bits [255:224] of the
+/// returned vector.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_permutevar_ps(__m256 __a, __m256i __c)
@@ -971,8 +970,8 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)__a, (__v8si)__c);
}
-/// \brief Copies the values in a 128-bit vector of [2 x double] as
-/// specified by the immediate integer operand.
+/// \brief Copies the values in a 128-bit vector of [2 x double] as specified
+/// by the immediate integer operand.
///
/// \headerfile <x86intrin.h>
///
@@ -980,30 +979,31 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// __m128d _mm_permute_pd(__m128d A, const int C);
/// \endcode
///
-/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+/// This intrinsic corresponds to the <c> VPERMILPD </c> instruction.
///
/// \param A
/// A 128-bit vector of [2 x double].
/// \param C
-/// An immediate integer operand specifying how the values are to be copied.
-/// Bit [0]:
-/// 0: Bits [63:0] of the source are copied to bits [63:0] of the
-/// returned vector.
-/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
-/// returned vector.
-/// Bit [1]:
-/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
-/// returned vector.
-/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
-/// returned vector.
+/// An immediate integer operand specifying how the values are to be
+/// copied. \n
+/// Bit [0]: \n
+/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned
+/// vector. \n
+/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
+/// returned vector. \n
+/// Bit [1]: \n
+/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
+/// returned vector. \n
+/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
+/// returned vector.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
#define _mm_permute_pd(A, C) __extension__ ({ \
(__m128d)__builtin_shufflevector((__v2df)(__m128d)(A), \
(__v2df)_mm_undefined_pd(), \
((C) >> 0) & 0x1, ((C) >> 1) & 0x1); })
-/// \brief Copies the values in a 256-bit vector of [4 x double] as
-/// specified by the immediate integer operand.
+/// \brief Copies the values in a 256-bit vector of [4 x double] as specified by
+/// the immediate integer operand.
///
/// \headerfile <x86intrin.h>
///
@@ -1011,32 +1011,33 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// __m256d _mm256_permute_pd(__m256d A, const int C);
/// \endcode
///
-/// This intrinsic corresponds to the \c VPERMILPD / PERMILPD instruction.
+/// This intrinsic corresponds to the <c> VPERMILPD </c> instruction.
///
/// \param A
/// A 256-bit vector of [4 x double].
/// \param C
-/// An immediate integer operand specifying how the values are to be copied.
-/// Bit [0]:
-/// 0: Bits [63:0] of the source are copied to bits [63:0] of the
-/// returned vector.
-/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
-/// returned vector.
-/// Bit [1]:
-/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
-/// returned vector.
-/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
-/// returned vector.
-/// Bit [2]:
-/// 0: Bits [191:128] of the source are copied to bits [191:128] of the
-/// returned vector.
-/// 1: Bits [255:192] of the source are copied to bits [191:128] of the
-/// returned vector.
-/// Bit [3]:
-/// 0: Bits [191:128] of the source are copied to bits [255:192] of the
-/// returned vector.
-/// 1: Bits [255:192] of the source are copied to bits [255:192] of the
-/// returned vector.
+/// An immediate integer operand specifying how the values are to be
+/// copied. \n
+/// Bit [0]: \n
+/// 0: Bits [63:0] of the source are copied to bits [63:0] of the returned
+/// vector. \n
+/// 1: Bits [127:64] of the source are copied to bits [63:0] of the
+/// returned vector. \n
+/// Bit [1]: \n
+/// 0: Bits [63:0] of the source are copied to bits [127:64] of the
+/// returned vector. \n
+/// 1: Bits [127:64] of the source are copied to bits [127:64] of the
+/// returned vector. \n
+/// Bit [2]: \n
+/// 0: Bits [191:128] of the source are copied to bits [191:128] of the
+/// returned vector. \n
+/// 1: Bits [255:192] of the source are copied to bits [191:128] of the
+/// returned vector. \n
+/// Bit [3]: \n
+/// 0: Bits [191:128] of the source are copied to bits [255:192] of the
+/// returned vector. \n
+/// 1: Bits [255:192] of the source are copied to bits [255:192] of the
+/// returned vector.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
#define _mm256_permute_pd(A, C) __extension__ ({ \
(__m256d)__builtin_shufflevector((__v4df)(__m256d)(A), \
@@ -1046,8 +1047,8 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
2 + (((C) >> 2) & 0x1), \
2 + (((C) >> 3) & 0x1)); })
-/// \brief Copies the values in a 128-bit vector of [4 x float] as
-/// specified by the immediate integer operand.
+/// \brief Copies the values in a 128-bit vector of [4 x float] as specified by
+/// the immediate integer operand.
///
/// \headerfile <x86intrin.h>
///
@@ -1055,48 +1056,49 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// __m128 _mm_permute_ps(__m128 A, const int C);
/// \endcode
///
-/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+/// This intrinsic corresponds to the <c> VPERMILPS </c> instruction.
///
/// \param A
/// A 128-bit vector of [4 x float].
/// \param C
-/// An immediate integer operand specifying how the values are to be copied.
-/// Bits [1:0]:
-/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// Bits [3:2]:
-/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// Bits [5:4]:
-/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// Bits [7:6]:
-/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
-/// returned vector.
+/// An immediate integer operand specifying how the values are to be
+/// copied. \n
+/// Bits [1:0]: \n
+/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// Bits [3:2]: \n
+/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// Bits [5:4]: \n
+/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// Bits [7:6]: \n
+/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
+/// returned vector.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
#define _mm_permute_ps(A, C) __extension__ ({ \
(__m128)__builtin_shufflevector((__v4sf)(__m128)(A), \
@@ -1104,8 +1106,8 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
((C) >> 0) & 0x3, ((C) >> 2) & 0x3, \
((C) >> 4) & 0x3, ((C) >> 6) & 0x3); })
-/// \brief Copies the values in a 256-bit vector of [8 x float] as
-/// specified by the immediate integer operand.
+/// \brief Copies the values in a 256-bit vector of [8 x float] as specified by
+/// the immediate integer operand.
///
/// \headerfile <x86intrin.h>
///
@@ -1113,84 +1115,85 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// __m256 _mm256_permute_ps(__m256 A, const int C);
/// \endcode
///
-/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+/// This intrinsic corresponds to the <c> VPERMILPS </c> instruction.
///
/// \param A
/// A 256-bit vector of [8 x float].
/// \param C
-/// An immediate integer operand specifying how the values are to be copied.
-/// Bits [1:0]:
-/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
-/// returned vector.
-/// Bits [3:2]:
-/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
-/// returned vector.
-/// Bits [5:4]:
-/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
-/// returned vector.
-/// Bits [7:6]:
-/// 00: Bits [31:0] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
-/// returned vector.
-/// Bits [1:0]:
-/// 00: Bits [159:128] of the source are copied to bits [159:128] of the
-/// returned vector.
-/// 01: Bits [191:160] of the source are copied to bits [159:128] of the
-/// returned vector.
-/// 10: Bits [223:192] of the source are copied to bits [159:128] of the
-/// returned vector.
-/// 11: Bits [255:224] of the source are copied to bits [159:128] of the
-/// returned vector.
-/// Bits [3:2]:
-/// 00: Bits [159:128] of the source are copied to bits [191:160] of the
-/// returned vector.
-/// 01: Bits [191:160] of the source are copied to bits [191:160] of the
-/// returned vector.
-/// 10: Bits [223:192] of the source are copied to bits [191:160] of the
-/// returned vector.
-/// 11: Bits [255:224] of the source are copied to bits [191:160] of the
-/// returned vector.
-/// Bits [5:4]:
-/// 00: Bits [159:128] of the source are copied to bits [223:192] of the
-/// returned vector.
-/// 01: Bits [191:160] of the source are copied to bits [223:192] of the
-/// returned vector.
-/// 10: Bits [223:192] of the source are copied to bits [223:192] of the
-/// returned vector.
-/// 11: Bits [255:224] of the source are copied to bits [223:192] of the
-/// returned vector.
-/// Bits [7:6]:
-/// 00: Bits [159:128] of the source are copied to bits [255:224] of the
-/// returned vector.
-/// 01: Bits [191:160] of the source are copied to bits [255:224] of the
-/// returned vector.
-/// 10: Bits [223:192] of the source are copied to bits [255:224] of the
-/// returned vector.
-/// 11: Bits [255:224] of the source are copied to bits [255:224] of the
-/// returned vector.
+/// An immediate integer operand specifying how the values are to be \n
+/// copied. \n
+/// Bits [1:0]: \n
+/// 00: Bits [31:0] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [31:0] of the
+/// returned vector. \n
+/// Bits [3:2]: \n
+/// 00: Bits [31:0] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [63:32] of the
+/// returned vector. \n
+/// Bits [5:4]: \n
+/// 00: Bits [31:0] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [95:64] of the
+/// returned vector. \n
+/// Bits [7:6]: \n
+/// 00: Bits [31:qq0] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 01: Bits [63:32] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 10: Bits [95:64] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// 11: Bits [127:96] of the source are copied to bits [127:96] of the
+/// returned vector. \n
+/// Bits [1:0]: \n
+/// 00: Bits [159:128] of the source are copied to bits [159:128] of the
+/// returned vector. \n
+/// 01: Bits [191:160] of the source are copied to bits [159:128] of the
+/// returned vector. \n
+/// 10: Bits [223:192] of the source are copied to bits [159:128] of the
+/// returned vector. \n
+/// 11: Bits [255:224] of the source are copied to bits [159:128] of the
+/// returned vector. \n
+/// Bits [3:2]: \n
+/// 00: Bits [159:128] of the source are copied to bits [191:160] of the
+/// returned vector. \n
+/// 01: Bits [191:160] of the source are copied to bits [191:160] of the
+/// returned vector. \n
+/// 10: Bits [223:192] of the source are copied to bits [191:160] of the
+/// returned vector. \n
+/// 11: Bits [255:224] of the source are copied to bits [191:160] of the
+/// returned vector. \n
+/// Bits [5:4]: \n
+/// 00: Bits [159:128] of the source are copied to bits [223:192] of the
+/// returned vector. \n
+/// 01: Bits [191:160] of the source are copied to bits [223:192] of the
+/// returned vector. \n
+/// 10: Bits [223:192] of the source are copied to bits [223:192] of the
+/// returned vector. \n
+/// 11: Bits [255:224] of the source are copied to bits [223:192] of the
+/// returned vector. \n
+/// Bits [7:6]: \n
+/// 00: Bits [159:128] of the source are copied to bits [255:224] of the
+/// returned vector. \n
+/// 01: Bits [191:160] of the source are copied to bits [255:224] of the
+/// returned vector. \n
+/// 10: Bits [223:192] of the source are copied to bits [255:224] of the
+/// returned vector. \n
+/// 11: Bits [255:224] of the source are copied to bits [255:224] of the
+/// returned vector.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
#define _mm256_permute_ps(A, C) __extension__ ({ \
(__m256)__builtin_shufflevector((__v8sf)(__m256)(A), \
@@ -1213,7 +1216,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// __m256d _mm256_permute2f128_pd(__m256d V1, __m256d V2, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the \c VPERM2F128 / PERM2F128 instruction.
+/// This intrinsic corresponds to the <c> VPERM2F128 </c> instruction.
///
/// \param V1
/// A 256-bit vector of [4 x double].
@@ -1221,25 +1224,25 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// A 256-bit vector of [4 x double.
/// \param M
/// An immediate integer operand specifying how the values are to be
-/// permuted.
-/// Bits [1:0]:
-/// 00: Bits [127:0] of operand V1 are copied to bits [127:0] of the
-/// destination.
-/// 01: Bits [255:128] of operand V1 are copied to bits [127:0] of the
-/// destination.
-/// 10: Bits [127:0] of operand V2 are copied to bits [127:0] of the
-/// destination.
-/// 11: Bits [255:128] of operand V2 are copied to bits [127:0] of the
-/// destination.
-/// Bits [5:4]:
-/// 00: Bits [127:0] of operand V1 are copied to bits [255:128] of the
-/// destination.
-/// 01: Bits [255:128] of operand V1 are copied to bits [255:128] of the
-/// destination.
-/// 10: Bits [127:0] of operand V2 are copied to bits [255:128] of the
-/// destination.
-/// 11: Bits [255:128] of operand V2 are copied to bits [255:128] of the
-/// destination.
+/// permuted. \n
+/// Bits [1:0]: \n
+/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the
+/// destination. \n
+/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the
+/// destination. \n
+/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the
+/// destination. \n
+/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the
+/// destination. \n
+/// Bits [5:4]: \n
+/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the
+/// destination. \n
+/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the
+/// destination. \n
+/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the
+/// destination. \n
+/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the
+/// destination.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
#define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
(__m256d)__builtin_ia32_vperm2f128_pd256((__v4df)(__m256d)(V1), \
@@ -1254,7 +1257,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// __m256 _mm256_permute2f128_ps(__m256 V1, __m256 V2, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the \c VPERM2F128 / PERM2F128 instruction.
+/// This intrinsic corresponds to the <c> VPERM2F128 </c> instruction.
///
/// \param V1
/// A 256-bit vector of [8 x float].
@@ -1262,24 +1265,24 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// A 256-bit vector of [8 x float].
/// \param M
/// An immediate integer operand specifying how the values are to be
-/// permuted.
-/// Bits [1:0]:
-/// 00: Bits [127:0] of operand V1 are copied to bits [127:0] of the
-/// destination.
-/// 01: Bits [255:128] of operand V1 are copied to bits [127:0] of the
-/// destination.
-/// 10: Bits [127:0] of operand V2 are copied to bits [127:0] of the
-/// destination.
-/// 11: Bits [255:128] of operand V2 are copied to bits [127:0] of the
-/// destination.
-/// Bits [5:4]:
-/// 00: Bits [127:0] of operand V1 are copied to bits [255:128] of the
-/// destination.
-/// 01: Bits [255:128] of operand V1 are copied to bits [255:128] of the
-/// destination.
-/// 10: Bits [127:0] of operand V2 are copied to bits [255:128] of the
-/// destination.
-/// 11: Bits [255:128] of operand V2 are copied to bits [255:128] of the
+/// permuted. \n
+/// Bits [1:0]: \n
+/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the
+/// destination. \n
+/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the
+/// destination. \n
+/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the
+/// destination. \n
+/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the
+/// destination. \n
+/// Bits [5:4]: \n
+/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the
+/// destination. \n
+/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the
+/// destination. \n
+/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the
+/// destination. \n
+/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the
/// destination.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
#define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
@@ -1295,7 +1298,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// __m256i _mm256_permute2f128_si256(__m256i V1, __m256i V2, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the \c VPERM2F128 / PERM2F128 instruction.
+/// This intrinsic corresponds to the <c> VPERM2F128 </c> instruction.
///
/// \param V1
/// A 256-bit integer vector.
@@ -1303,23 +1306,23 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// A 256-bit integer vector.
/// \param M
/// An immediate integer operand specifying how the values are to be copied.
-/// Bits [1:0]:
-/// 00: Bits [127:0] of operand V1 are copied to bits [127:0] of the
-/// destination.
-/// 01: Bits [255:128] of operand V1 are copied to bits [127:0] of the
-/// destination.
-/// 10: Bits [127:0] of operand V2 are copied to bits [127:0] of the
-/// destination.
-/// 11: Bits [255:128] of operand V2 are copied to bits [127:0] of the
-/// destination.
-/// Bits [5:4]:
-/// 00: Bits [127:0] of operand V1 are copied to bits [255:128] of the
-/// destination.
-/// 01: Bits [255:128] of operand V1 are copied to bits [255:128] of the
-/// destination.
-/// 10: Bits [127:0] of operand V2 are copied to bits [255:128] of the
-/// destination.
-/// 11: Bits [255:128] of operand V2 are copied to bits [255:128] of the
+/// Bits [1:0]: \n
+/// 00: Bits [127:0] of operand \a V1 are copied to bits [127:0] of the
+/// destination. \n
+/// 01: Bits [255:128] of operand \a V1 are copied to bits [127:0] of the
+/// destination. \n
+/// 10: Bits [127:0] of operand \a V2 are copied to bits [127:0] of the
+/// destination. \n
+/// 11: Bits [255:128] of operand \a V2 are copied to bits [127:0] of the
+/// destination. \n
+/// Bits [5:4]: \n
+/// 00: Bits [127:0] of operand \a V1 are copied to bits [255:128] of the
+/// destination. \n
+/// 01: Bits [255:128] of operand \a V1 are copied to bits [255:128] of the
+/// destination. \n
+/// 10: Bits [127:0] of operand \a V2 are copied to bits [255:128] of the
+/// destination. \n
+/// 11: Bits [255:128] of operand \a V2 are copied to bits [255:128] of the
/// destination.
/// \returns A 256-bit integer vector containing the copied values.
#define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
@@ -1337,7 +1340,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// __m256d _mm256_blend_pd(__m256d V1, __m256d V2, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the \c VBLENDPD / BLENDPD instruction.
+/// This intrinsic corresponds to the <c> VBLENDPD </c> instruction.
///
/// \param V1
/// A 256-bit vector of [4 x double].
@@ -1347,9 +1350,9 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// An immediate integer operand, with mask bits [3:0] specifying how the
/// values are to be copied. The position of the mask bit corresponds to the
/// index of a copied value. When a mask bit is 0, the corresponding 64-bit
-/// element in operand V1 is copied to the same position in the destination.
-/// When a mask bit is 1, the corresponding 64-bit element in operand V2 is
-/// copied to the same position in the destination.
+/// element in operand \a V1 is copied to the same position in the
+/// destination. When a mask bit is 1, the corresponding 64-bit element in
+/// operand \a V2 is copied to the same position in the destination.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
#define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
(__m256d)__builtin_shufflevector((__v4df)(__m256d)(V1), \
@@ -1369,7 +1372,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// __m256 _mm256_blend_ps(__m256 V1, __m256 V2, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the \c VBLENDPS / BLENDPS instruction.
+/// This intrinsic corresponds to the <c> VBLENDPS </c> instruction.
///
/// \param V1
/// A 256-bit vector of [8 x float].
@@ -1379,9 +1382,9 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// An immediate integer operand, with mask bits [7:0] specifying how the
/// values are to be copied. The position of the mask bit corresponds to the
/// index of a copied value. When a mask bit is 0, the corresponding 32-bit
-/// element in operand V1 is copied to the same position in the destination.
-/// When a mask bit is 1, the corresponding 32-bit element in operand V2 is
-/// copied to the same position in the destination.
+/// element in operand \a V1 is copied to the same position in the
+/// destination. When a mask bit is 1, the corresponding 32-bit element in
+/// operand \a V2 is copied to the same position in the destination.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
#define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
(__m256)__builtin_shufflevector((__v8sf)(__m256)(V1), \
@@ -1401,7 +1404,7 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VBLENDVPD / BLENDVPD instruction.
+/// This intrinsic corresponds to the <c> VBLENDVPD </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double].
@@ -1411,9 +1414,9 @@ _mm256_permutevar_ps(__m256 __a, __m256i __c)
/// A 256-bit vector operand, with mask bits 255, 191, 127, and 63 specifying
/// how the values are to be copied. The position of the mask bit corresponds
/// to the most significant bit of a copied value. When a mask bit is 0, the
-/// corresponding 64-bit element in operand __a is copied to the same
+/// corresponding 64-bit element in operand \a __a is copied to the same
/// position in the destination. When a mask bit is 1, the corresponding
-/// 64-bit element in operand __b is copied to the same position in the
+/// 64-bit element in operand \a __b is copied to the same position in the
/// destination.
/// \returns A 256-bit vector of [4 x double] containing the copied values.
static __inline __m256d __DEFAULT_FN_ATTRS
@@ -1429,7 +1432,7 @@ _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VBLENDVPS / BLENDVPS instruction.
+/// This intrinsic corresponds to the <c> VBLENDVPS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float].
@@ -1439,9 +1442,9 @@ _mm256_blendv_pd(__m256d __a, __m256d __b, __m256d __c)
/// A 256-bit vector operand, with mask bits 255, 223, 191, 159, 127, 95, 63,
/// and 31 specifying how the values are to be copied. The position of the
/// mask bit corresponds to the most significant bit of a copied value. When
-/// a mask bit is 0, the corresponding 32-bit element in operand __a is
+/// a mask bit is 0, the corresponding 32-bit element in operand \a __a is
/// copied to the same position in the destination. When a mask bit is 1, the
-/// corresponding 32-bit element in operand __b is copied to the same
+/// corresponding 32-bit element in operand \a __b is copied to the same
/// position in the destination.
/// \returns A 256-bit vector of [8 x float] containing the copied values.
static __inline __m256 __DEFAULT_FN_ATTRS
@@ -1455,12 +1458,12 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// \brief Computes two dot products in parallel, using the lower and upper
/// halves of two [8 x float] vectors as input to the two computations, and
/// returning the two dot products in the lower and upper halves of the
-/// [8 x float] result. The immediate integer operand controls which
-/// input elements will contribute to the dot product, and where the final
-/// results are returned. In general, for each dot product, the four
-/// corresponding elements of the input vectors are multiplied; the first
-/// two and second two products are summed, then the two sums are added to
-/// form the final result.
+/// [8 x float] result. The immediate integer operand controls which input
+/// elements will contribute to the dot product, and where the final results
+/// are returned. In general, for each dot product, the four corresponding
+/// elements of the input vectors are multiplied; the first two and second
+/// two products are summed, then the two sums are added to form the final
+/// result.
///
/// \headerfile <x86intrin.h>
///
@@ -1468,7 +1471,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// __m256 _mm256_dp_ps(__m256 V1, __m256 V2, const int M);
/// \endcode
///
-/// This intrinsic corresponds to the \c VDPPS / DPPS instruction.
+/// This intrinsic corresponds to the <c> VDPPS </c> instruction.
///
/// \param V1
/// A vector of [8 x float] values, treated as two [4 x float] vectors.
@@ -1510,7 +1513,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// __m256 _mm256_shuffle_ps(__m256 a, __m256 b, const int mask);
/// \endcode
///
-/// This intrinsic corresponds to the \c VSHUFPS / SHUFPS instruction.
+/// This intrinsic corresponds to the <c> VSHUFPS </c> instruction.
///
/// \param a
/// A 256-bit vector of [8 x float]. The four selected elements in this
@@ -1522,22 +1525,23 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// destination, according to the bits specified in the immediate operand.
/// \param mask
/// An immediate value containing an 8-bit value specifying which elements to
-/// copy from a and b. Bits [3:0] specify the values copied from operand a.
-/// Bits [7:4] specify the values copied from operand b.
+/// copy from \a a and \a b \n.
+/// Bits [3:0] specify the values copied from operand \a a. \n
+/// Bits [7:4] specify the values copied from operand \a b. \n
/// The destinations within the 256-bit destination are assigned values as
-/// follows, according to the bit value assignments described below:
+/// follows, according to the bit value assignments described below: \n
/// Bits [1:0] are used to assign values to bits [31:0] and [159:128] in the
-/// destination.
+/// destination. \n
/// Bits [3:2] are used to assign values to bits [63:32] and [191:160] in the
-/// destination.
+/// destination. \n
/// Bits [5:4] are used to assign values to bits [95:64] and [223:192] in the
-/// destination.
+/// destination. \n
/// Bits [7:6] are used to assign values to bits [127:96] and [255:224] in
-/// the destination.
-/// Bit value assignments:
-/// 00: Bits [31:0] and [159:128] are copied from the selected operand.
-/// 01: Bits [63:32] and [191:160] are copied from the selected operand.
-/// 10: Bits [95:64] and [223:192] are copied from the selected operand.
+/// the destination. \n
+/// Bit value assignments: \n
+/// 00: Bits [31:0] and [159:128] are copied from the selected operand. \n
+/// 01: Bits [63:32] and [191:160] are copied from the selected operand. \n
+/// 10: Bits [95:64] and [223:192] are copied from the selected operand. \n
/// 11: Bits [127:96] and [255:224] are copied from the selected operand.
/// \returns A 256-bit vector of [8 x float] containing the shuffled values.
#define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
@@ -1567,7 +1571,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// __m256d _mm256_shuffle_pd(__m256d a, __m256d b, const int mask);
/// \endcode
///
-/// This intrinsic corresponds to the \c VSHUFPD / SHUFPD instruction.
+/// This intrinsic corresponds to the <c> VSHUFPD </c> instruction.
///
/// \param a
/// A 256-bit vector of [4 x double].
@@ -1575,22 +1579,22 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// A 256-bit vector of [4 x double].
/// \param mask
/// An immediate value containing 8-bit values specifying which elements to
-/// copy from a and b:
-/// Bit [0]=0: Bits [63:0] are copied from a to bits [63:0] of the
-/// destination.
-/// Bit [0]=1: Bits [127:64] are copied from a to bits [63:0] of the
-/// destination.
-/// Bit [1]=0: Bits [63:0] are copied from b to bits [127:64] of the
-/// destination.
-/// Bit [1]=1: Bits [127:64] are copied from b to bits [127:64] of the
-/// destination.
-/// Bit [2]=0: Bits [191:128] are copied from a to bits [191:128] of the
-/// destination.
-/// Bit [2]=1: Bits [255:192] are copied from a to bits [191:128] of the
-/// destination.
-/// Bit [3]=0: Bits [191:128] are copied from b to bits [255:192] of the
-/// destination.
-/// Bit [3]=1: Bits [255:192] are copied from b to bits [255:192] of the
+/// copy from \a a and \a b: \n
+/// Bit [0]=0: Bits [63:0] are copied from \a a to bits [63:0] of the
+/// destination. \n
+/// Bit [0]=1: Bits [127:64] are copied from \a a to bits [63:0] of the
+/// destination. \n
+/// Bit [1]=0: Bits [63:0] are copied from \a b to bits [127:64] of the
+/// destination. \n
+/// Bit [1]=1: Bits [127:64] are copied from \a b to bits [127:64] of the
+/// destination. \n
+/// Bit [2]=0: Bits [191:128] are copied from \a a to bits [191:128] of the
+/// destination. \n
+/// Bit [2]=1: Bits [255:192] are copied from \a a to bits [191:128] of the
+/// destination. \n
+/// Bit [3]=0: Bits [191:128] are copied from \a b to bits [255:192] of the
+/// destination. \n
+/// Bit [3]=1: Bits [255:192] are copied from \a b to bits [255:192] of the
/// destination.
/// \returns A 256-bit vector of [4 x double] containing the shuffled values.
#define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
@@ -1647,7 +1651,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// __m128d _mm_cmp_pd(__m128d a, __m128d b, const int c);
/// \endcode
///
-/// This intrinsic corresponds to the \c VCMPPD / CMPPD instruction.
+/// This intrinsic corresponds to the <c> VCMPPD </c> instruction.
///
/// \param a
/// A 128-bit vector of [2 x double].
@@ -1655,16 +1659,17 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// A 128-bit vector of [2 x double].
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
-/// operation to use:
-/// 00h, 08h, 10h, 18h: Equal
-/// 01h, 09h, 11h, 19h: Less than
-/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
-/// operands)
-/// 03h, 0Bh, 13h, 1Bh: Unordered
-/// 04h, 0Ch, 14h, 1Ch: Not equal
-/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// operation to use: \n
+/// 00h, 08h, 10h, 18h: Equal \n
+/// 01h, 09h, 11h, 19h: Less than \n
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal
+/// (swapped operands) \n
+/// 03h, 0Bh, 13h, 1Bh: Unordered \n
+/// 04h, 0Ch, 14h, 1Ch: Not equal \n
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than
+/// (swapped operands) \n
/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
-/// (swapped operands)
+/// (swapped operands) \n
/// 07h, 0Fh, 17h, 1Fh: Ordered
/// \returns A 128-bit vector of [2 x double] containing the comparison results.
#define _mm_cmp_pd(a, b, c) __extension__ ({ \
@@ -1683,7 +1688,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// __m128 _mm_cmp_ps(__m128 a, __m128 b, const int c);
/// \endcode
///
-/// This intrinsic corresponds to the \c VCMPPS / CMPPS instruction.
+/// This intrinsic corresponds to the <c> VCMPPS </c> instruction.
///
/// \param a
/// A 128-bit vector of [4 x float].
@@ -1691,16 +1696,17 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// A 128-bit vector of [4 x float].
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
-/// operation to use:
-/// 00h, 08h, 10h, 18h: Equal
-/// 01h, 09h, 11h, 19h: Less than
-/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
-/// operands)
-/// 03h, 0Bh, 13h, 1Bh: Unordered
-/// 04h, 0Ch, 14h, 1Ch: Not equal
-/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// operation to use: \n
+/// 00h, 08h, 10h, 18h: Equal \n
+/// 01h, 09h, 11h, 19h: Less than \n
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal
+/// (swapped operands) \n
+/// 03h, 0Bh, 13h, 1Bh: Unordered \n
+/// 04h, 0Ch, 14h, 1Ch: Not equal \n
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than
+/// (swapped operands) \n
/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
-/// (swapped operands)
+/// (swapped operands) \n
/// 07h, 0Fh, 17h, 1Fh: Ordered
/// \returns A 128-bit vector of [4 x float] containing the comparison results.
#define _mm_cmp_ps(a, b, c) __extension__ ({ \
@@ -1719,7 +1725,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// __m256d _mm256_cmp_pd(__m256d a, __m256d b, const int c);
/// \endcode
///
-/// This intrinsic corresponds to the \c VCMPPD / CMPPD instruction.
+/// This intrinsic corresponds to the <c> VCMPPD </c> instruction.
///
/// \param a
/// A 256-bit vector of [4 x double].
@@ -1727,16 +1733,17 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// A 256-bit vector of [4 x double].
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
-/// operation to use:
-/// 00h, 08h, 10h, 18h: Equal
-/// 01h, 09h, 11h, 19h: Less than
-/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
-/// operands)
-/// 03h, 0Bh, 13h, 1Bh: Unordered
-/// 04h, 0Ch, 14h, 1Ch: Not equal
-/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// operation to use: \n
+/// 00h, 08h, 10h, 18h: Equal \n
+/// 01h, 09h, 11h, 19h: Less than \n
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal
+/// (swapped operands) \n
+/// 03h, 0Bh, 13h, 1Bh: Unordered \n
+/// 04h, 0Ch, 14h, 1Ch: Not equal \n
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than
+/// (swapped operands) \n
/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
-/// (swapped operands)
+/// (swapped operands) \n
/// 07h, 0Fh, 17h, 1Fh: Ordered
/// \returns A 256-bit vector of [4 x double] containing the comparison results.
#define _mm256_cmp_pd(a, b, c) __extension__ ({ \
@@ -1755,7 +1762,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// __m256 _mm256_cmp_ps(__m256 a, __m256 b, const int c);
/// \endcode
///
-/// This intrinsic corresponds to the \c VCMPPS / CMPPS instruction.
+/// This intrinsic corresponds to the <c> VCMPPS </c> instruction.
///
/// \param a
/// A 256-bit vector of [8 x float].
@@ -1763,16 +1770,17 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// A 256-bit vector of [8 x float].
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
-/// operation to use:
-/// 00h, 08h, 10h, 18h: Equal
-/// 01h, 09h, 11h, 19h: Less than
-/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
-/// operands)
-/// 03h, 0Bh, 13h, 1Bh: Unordered
-/// 04h, 0Ch, 14h, 1Ch: Not equal
-/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// operation to use: \n
+/// 00h, 08h, 10h, 18h: Equal \n
+/// 01h, 09h, 11h, 19h: Less than \n
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal
+/// (swapped operands) \n
+/// 03h, 0Bh, 13h, 1Bh: Unordered \n
+/// 04h, 0Ch, 14h, 1Ch: Not equal \n
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than
+/// (swapped operands) \n
/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
-/// (swapped operands)
+/// (swapped operands) \n
/// 07h, 0Fh, 17h, 1Fh: Ordered
/// \returns A 256-bit vector of [8 x float] containing the comparison results.
#define _mm256_cmp_ps(a, b, c) __extension__ ({ \
@@ -1790,7 +1798,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// __m128d _mm_cmp_sd(__m128d a, __m128d b, const int c);
/// \endcode
///
-/// This intrinsic corresponds to the \c VCMPSD / CMPSD instruction.
+/// This intrinsic corresponds to the <c> VCMPSD </c> instruction.
///
/// \param a
/// A 128-bit vector of [2 x double].
@@ -1798,16 +1806,17 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// A 128-bit vector of [2 x double].
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
-/// operation to use:
-/// 00h, 08h, 10h, 18h: Equal
-/// 01h, 09h, 11h, 19h: Less than
-/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
-/// operands)
-/// 03h, 0Bh, 13h, 1Bh: Unordered
-/// 04h, 0Ch, 14h, 1Ch: Not equal
-/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// operation to use: \n
+/// 00h, 08h, 10h, 18h: Equal \n
+/// 01h, 09h, 11h, 19h: Less than \n
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal
+/// (swapped operands) \n
+/// 03h, 0Bh, 13h, 1Bh: Unordered \n
+/// 04h, 0Ch, 14h, 1Ch: Not equal \n
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than
+/// (swapped operands) \n
/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
-/// (swapped operands)
+/// (swapped operands) \n
/// 07h, 0Fh, 17h, 1Fh: Ordered
/// \returns A 128-bit vector of [2 x double] containing the comparison results.
#define _mm_cmp_sd(a, b, c) __extension__ ({ \
@@ -1825,7 +1834,7 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// __m128 _mm_cmp_ss(__m128 a, __m128 b, const int c);
/// \endcode
///
-/// This intrinsic corresponds to the \c VCMPSS / CMPSS instruction.
+/// This intrinsic corresponds to the <c> VCMPSS </c> instruction.
///
/// \param a
/// A 128-bit vector of [4 x float].
@@ -1833,16 +1842,17 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// A 128-bit vector of [4 x float].
/// \param c
/// An immediate integer operand, with bits [4:0] specifying which comparison
-/// operation to use:
-/// 00h, 08h, 10h, 18h: Equal
-/// 01h, 09h, 11h, 19h: Less than
-/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal (swapped
-/// operands)
-/// 03h, 0Bh, 13h, 1Bh: Unordered
-/// 04h, 0Ch, 14h, 1Ch: Not equal
-/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than (swapped operands)
+/// operation to use: \n
+/// 00h, 08h, 10h, 18h: Equal \n
+/// 01h, 09h, 11h, 19h: Less than \n
+/// 02h, 0Ah, 12h, 1Ah: Less than or equal / Greater than or equal
+/// (swapped operands) \n
+/// 03h, 0Bh, 13h, 1Bh: Unordered \n
+/// 04h, 0Ch, 14h, 1Ch: Not equal \n
+/// 05h, 0Dh, 15h, 1Dh: Not less than / Not greater than
+/// (swapped operands) \n
/// 06h, 0Eh, 16h, 1Eh: Not less than or equal / Not greater than or equal
-/// (swapped operands)
+/// (swapped operands) \n
/// 07h, 0Fh, 17h, 1Fh: Ordered
/// \returns A 128-bit vector of [4 x float] containing the comparison results.
#define _mm_cmp_ss(a, b, c) __extension__ ({ \
@@ -1854,8 +1864,8 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
-/// EXTRACTF128+COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A 256-bit vector of [8 x i32].
@@ -1876,8 +1886,8 @@ _mm256_extract_epi32(__m256i __a, const int __imm)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
-/// EXTRACTF128+COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A 256-bit integer vector of [16 x i16].
@@ -1898,8 +1908,8 @@ _mm256_extract_epi16(__m256i __a, const int __imm)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
-/// EXTRACTF128+COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A 256-bit integer vector of [32 x i8].
@@ -1921,8 +1931,8 @@ _mm256_extract_epi8(__m256i __a, const int __imm)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VEXTRACTF128+COMPOSITE /
-/// EXTRACTF128+COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A 256-bit integer vector of [4 x i64].
@@ -1945,8 +1955,8 @@ _mm256_extract_epi64(__m256i __a, const int __imm)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
-/// INSERTF128+COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A vector of [8 x i32] to be used by the insert operation.
@@ -1955,8 +1965,8 @@ _mm256_extract_epi64(__m256i __a, const int __imm)
/// \param __imm
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector __a, after replacing its element indexed by __imm
-/// with __b.
+/// \returns A copy of vector \a __a, after replacing its element indexed by
+/// \a __imm with \a __b.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_insert_epi32(__m256i __a, int __b, int const __imm)
{
@@ -1972,8 +1982,8 @@ _mm256_insert_epi32(__m256i __a, int __b, int const __imm)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
-/// INSERTF128+COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A vector of [16 x i16] to be used by the insert operation.
@@ -1982,8 +1992,8 @@ _mm256_insert_epi32(__m256i __a, int __b, int const __imm)
/// \param __imm
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector __a, after replacing its element indexed by __imm
-/// with __b.
+/// \returns A copy of vector \a __a, after replacing its element indexed by
+/// \a __imm with \a __b.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_insert_epi16(__m256i __a, int __b, int const __imm)
{
@@ -1998,8 +2008,8 @@ _mm256_insert_epi16(__m256i __a, int __b, int const __imm)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
-/// INSERTF128+COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A vector of [32 x i8] to be used by the insert operation.
@@ -2008,8 +2018,8 @@ _mm256_insert_epi16(__m256i __a, int __b, int const __imm)
/// \param __imm
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector __a, after replacing its element indexed by __imm
-/// with __b.
+/// \returns A copy of vector \a __a, after replacing its element indexed by
+/// \a __imm with \a __b.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_insert_epi8(__m256i __a, int __b, int const __imm)
{
@@ -2025,8 +2035,8 @@ _mm256_insert_epi8(__m256i __a, int __b, int const __imm)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VINSERTF128+COMPOSITE /
-/// INSERTF128+COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A vector of [4 x i64] to be used by the insert operation.
@@ -2035,8 +2045,8 @@ _mm256_insert_epi8(__m256i __a, int __b, int const __imm)
/// \param __imm
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector __a, after replacing its element indexed by __imm
-/// with __b.
+/// \returns A copy of vector \a __a, after replacing its element indexed by
+/// \a __imm with \a __b.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_insert_epi64(__m256i __a, long long __b, int const __imm)
{
@@ -2051,7 +2061,7 @@ _mm256_insert_epi64(__m256i __a, long long __b, int const __imm)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTDQ2PD / CVTDQ2PD instruction.
+/// This intrinsic corresponds to the <c> VCVTDQ2PD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector of [4 x i32].
@@ -2066,7 +2076,7 @@ _mm256_cvtepi32_pd(__m128i __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTDQ2PS / CVTDQ2PS instruction.
+/// This intrinsic corresponds to the <c> VCVTDQ2PS </c> instruction.
///
/// \param __a
/// A 256-bit integer vector.
@@ -2082,7 +2092,7 @@ _mm256_cvtepi32_ps(__m256i __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTPD2PS / CVTPD2PS instruction.
+/// This intrinsic corresponds to the <c> VCVTPD2PS </c> instruction.
///
/// \param __a
/// A 256-bit vector of [4 x double].
@@ -2097,7 +2107,7 @@ _mm256_cvtpd_ps(__m256d __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTPS2DQ / CVTPS2DQ instruction.
+/// This intrinsic corresponds to the <c> VCVTPS2DQ </c> instruction.
///
/// \param __a
/// A 256-bit vector of [8 x float].
@@ -2108,24 +2118,66 @@ _mm256_cvtps_epi32(__m256 __a)
return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) __a);
}
+/// \brief Converts a 128-bit vector of [4 x float] into a 256-bit vector of [4
+/// x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTPS2PD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 256-bit vector of [4 x double] containing the converted values.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_cvtps_pd(__m128 __a)
{
return (__m256d)__builtin_convertvector((__v4sf)__a, __v4df);
}
+/// \brief Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4
+/// x i32], truncating the result by rounding towards zero when it is
+/// inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTTPD2DQ </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double].
+/// \returns A 128-bit integer vector containing the converted values.
static __inline __m128i __DEFAULT_FN_ATTRS
_mm256_cvttpd_epi32(__m256d __a)
{
return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) __a);
}
+/// \brief Converts a 256-bit vector of [4 x double] into a 128-bit vector of [4
+/// x i32]. When a conversion is inexact, the value returned is rounded
+/// according to the rounding control bits in the MXCSR register.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTPD2DQ </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double].
+/// \returns A 128-bit integer vector containing the converted values.
static __inline __m128i __DEFAULT_FN_ATTRS
_mm256_cvtpd_epi32(__m256d __a)
{
return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) __a);
}
+/// \brief Converts a vector of [8 x float] into a vector of [8 x i32],
+/// truncating the result by rounding towards zero when it is inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTTPS2DQ </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float].
+/// \returns A 256-bit integer vector containing the converted values.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_cvttps_epi32(__m256 __a)
{
@@ -2152,18 +2204,73 @@ _mm256_cvtss_f32(__m256 __a)
}
/* Vector replicate */
+/// \brief Moves and duplicates high-order (odd-indexed) values from a 256-bit
+/// vector of [8 x float] to float values in a 256-bit vector of
+/// [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVSHDUP </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float]. \n
+/// Bits [255:224] of \a __a are written to bits [255:224] and [223:192] of
+/// the return value. \n
+/// Bits [191:160] of \a __a are written to bits [191:160] and [159:128] of
+/// the return value. \n
+/// Bits [127:96] of \a __a are written to bits [127:96] and [95:64] of the
+/// return value. \n
+/// Bits [63:32] of \a __a are written to bits [63:32] and [31:0] of the
+/// return value.
+/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated
+/// values.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_movehdup_ps(__m256 __a)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 1, 1, 3, 3, 5, 5, 7, 7);
}
+/// \brief Moves and duplicates low-order (even-indexed) values from a 256-bit
+/// vector of [8 x float] to float values in a 256-bit vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVSLDUP </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float]. \n
+/// Bits [223:192] of \a __a are written to bits [255:224] and [223:192] of
+/// the return value. \n
+/// Bits [159:128] of \a __a are written to bits [191:160] and [159:128] of
+/// the return value. \n
+/// Bits [95:64] of \a __a are written to bits [127:96] and [95:64] of the
+/// return value. \n
+/// Bits [31:0] of \a __a are written to bits [63:32] and [31:0] of the
+/// return value.
+/// \returns A 256-bit vector of [8 x float] containing the moved and duplicated
+/// values.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_moveldup_ps(__m256 __a)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 0, 2, 2, 4, 4, 6, 6);
}
+/// \brief Moves and duplicates double-precision floating point values from a
+/// 256-bit vector of [4 x double] to double-precision values in a 256-bit
+/// vector of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVDDUP </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double]. \n
+/// Bits [63:0] of \a __a are written to bits [127:64] and [63:0] of the
+/// return value. \n
+/// Bits [191:128] of \a __a are written to bits [255:192] and [191:128] of
+/// the return value.
+/// \returns A 256-bit vector of [4 x double] containing the moved and
+/// duplicated values.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_movedup_pd(__m256d __a)
{
@@ -2171,24 +2278,98 @@ _mm256_movedup_pd(__m256d __a)
}
/* Unpack and Interleave */
+/// \brief Unpacks the odd-indexed vector elements from two 256-bit vectors of
+/// [4 x double] and interleaves them into a 256-bit vector of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUNPCKHPD </c> instruction.
+///
+/// \param __a
+/// A 256-bit floating-point vector of [4 x double]. \n
+/// Bits [127:64] are written to bits [63:0] of the return value. \n
+/// Bits [255:192] are written to bits [191:128] of the return value. \n
+/// \param __b
+/// A 256-bit floating-point vector of [4 x double]. \n
+/// Bits [127:64] are written to bits [127:64] of the return value. \n
+/// Bits [255:192] are written to bits [255:192] of the return value. \n
+/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_unpackhi_pd(__m256d __a, __m256d __b)
{
return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 1, 5, 1+2, 5+2);
}
+/// \brief Unpacks the even-indexed vector elements from two 256-bit vectors of
+/// [4 x double] and interleaves them into a 256-bit vector of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUNPCKLPD </c> instruction.
+///
+/// \param __a
+/// A 256-bit floating-point vector of [4 x double]. \n
+/// Bits [63:0] are written to bits [63:0] of the return value. \n
+/// Bits [191:128] are written to bits [191:128] of the return value.
+/// \param __b
+/// A 256-bit floating-point vector of [4 x double]. \n
+/// Bits [63:0] are written to bits [127:64] of the return value. \n
+/// Bits [191:128] are written to bits [255:192] of the return value. \n
+/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_unpacklo_pd(__m256d __a, __m256d __b)
{
return __builtin_shufflevector((__v4df)__a, (__v4df)__b, 0, 4, 0+2, 4+2);
}
+/// \brief Unpacks the 32-bit vector elements 2, 3, 6 and 7 from each of the
+/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit
+/// vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUNPCKHPS </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float]. \n
+/// Bits [95:64] are written to bits [31:0] of the return value. \n
+/// Bits [127:96] are written to bits [95:64] of the return value. \n
+/// Bits [223:192] are written to bits [159:128] of the return value. \n
+/// Bits [255:224] are written to bits [223:192] of the return value.
+/// \param __b
+/// A 256-bit vector of [8 x float]. \n
+/// Bits [95:64] are written to bits [63:32] of the return value. \n
+/// Bits [127:96] are written to bits [127:96] of the return value. \n
+/// Bits [223:192] are written to bits [191:160] of the return value. \n
+/// Bits [255:224] are written to bits [255:224] of the return value.
+/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_unpackhi_ps(__m256 __a, __m256 __b)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
}
+/// \brief Unpacks the 32-bit vector elements 0, 1, 4 and 5 from each of the
+/// two 256-bit vectors of [8 x float] and interleaves them into a 256-bit
+/// vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUNPCKLPS </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float]. \n
+/// Bits [31:0] are written to bits [31:0] of the return value. \n
+/// Bits [63:32] are written to bits [95:64] of the return value. \n
+/// Bits [159:128] are written to bits [159:128] of the return value. \n
+/// Bits [191:160] are written to bits [223:192] of the return value.
+/// \param __b
+/// A 256-bit vector of [8 x float]. \n
+/// Bits [31:0] are written to bits [63:32] of the return value. \n
+/// Bits [63:32] are written to bits [127:96] of the return value. \n
+/// Bits [159:128] are written to bits [191:160] of the return value. \n
+/// Bits [191:160] are written to bits [255:224] of the return value.
+/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_unpacklo_ps(__m256 __a, __m256 __b)
{
@@ -2196,90 +2377,401 @@ _mm256_unpacklo_ps(__m256 __a, __m256 __b)
}
/* Bit Test */
+/// \brief Given two 128-bit floating-point vectors of [2 x double], perform an
+/// element-by-element comparison of the double-precision element in the
+/// first source vector and the corresponding element in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns the value of the ZF flag.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns the ZF flag in the EFLAGS register.
static __inline int __DEFAULT_FN_ATTRS
_mm_testz_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_vtestzpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Given two 128-bit floating-point vectors of [2 x double], perform an
+/// element-by-element comparison of the double-precision element in the
+/// first source vector and the corresponding element in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns the value of the CF flag.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns the CF flag in the EFLAGS register.
static __inline int __DEFAULT_FN_ATTRS
_mm_testc_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_vtestcpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Given two 128-bit floating-point vectors of [2 x double], perform an
+/// element-by-element comparison of the double-precision element in the
+/// first source vector and the corresponding element in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns 1 if both the ZF and CF flags are set to 0,
+/// otherwise it returns 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
static __inline int __DEFAULT_FN_ATTRS
_mm_testnzc_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_vtestnzcpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Given two 128-bit floating-point vectors of [4 x float], perform an
+/// element-by-element comparison of the single-precision element in the
+/// first source vector and the corresponding element in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns the value of the ZF flag.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns the ZF flag.
static __inline int __DEFAULT_FN_ATTRS
_mm_testz_ps(__m128 __a, __m128 __b)
{
return __builtin_ia32_vtestzps((__v4sf)__a, (__v4sf)__b);
}
+/// \brief Given two 128-bit floating-point vectors of [4 x float], perform an
+/// element-by-element comparison of the single-precision element in the
+/// first source vector and the corresponding element in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns the value of the CF flag.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns the CF flag.
static __inline int __DEFAULT_FN_ATTRS
_mm_testc_ps(__m128 __a, __m128 __b)
{
return __builtin_ia32_vtestcps((__v4sf)__a, (__v4sf)__b);
}
+/// \brief Given two 128-bit floating-point vectors of [4 x float], perform an
+/// element-by-element comparison of the single-precision element in the
+/// first source vector and the corresponding element in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns 1 if both the ZF and CF flags are set to 0,
+/// otherwise it returns 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \param __b
+/// A 128-bit vector of [4 x float].
+/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
static __inline int __DEFAULT_FN_ATTRS
_mm_testnzc_ps(__m128 __a, __m128 __b)
{
return __builtin_ia32_vtestnzcps((__v4sf)__a, (__v4sf)__b);
}
+/// \brief Given two 256-bit floating-point vectors of [4 x double], perform an
+/// element-by-element comparison of the double-precision elements in the
+/// first source vector and the corresponding elements in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns the value of the ZF flag.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double].
+/// \param __b
+/// A 256-bit vector of [4 x double].
+/// \returns the ZF flag.
static __inline int __DEFAULT_FN_ATTRS
_mm256_testz_pd(__m256d __a, __m256d __b)
{
return __builtin_ia32_vtestzpd256((__v4df)__a, (__v4df)__b);
}
+/// \brief Given two 256-bit floating-point vectors of [4 x double], perform an
+/// element-by-element comparison of the double-precision elements in the
+/// first source vector and the corresponding elements in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns the value of the CF flag.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double].
+/// \param __b
+/// A 256-bit vector of [4 x double].
+/// \returns the CF flag.
static __inline int __DEFAULT_FN_ATTRS
_mm256_testc_pd(__m256d __a, __m256d __b)
{
return __builtin_ia32_vtestcpd256((__v4df)__a, (__v4df)__b);
}
+/// \brief Given two 256-bit floating-point vectors of [4 x double], perform an
+/// element-by-element comparison of the double-precision elements in the
+/// first source vector and the corresponding elements in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of double-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns 1 if both the ZF and CF flags are set to 0,
+/// otherwise it returns 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPD </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double].
+/// \param __b
+/// A 256-bit vector of [4 x double].
+/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
static __inline int __DEFAULT_FN_ATTRS
_mm256_testnzc_pd(__m256d __a, __m256d __b)
{
return __builtin_ia32_vtestnzcpd256((__v4df)__a, (__v4df)__b);
}
+/// \brief Given two 256-bit floating-point vectors of [8 x float], perform an
+/// element-by-element comparison of the single-precision element in the
+/// first source vector and the corresponding element in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns the value of the ZF flag.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float].
+/// \param __b
+/// A 256-bit vector of [8 x float].
+/// \returns the ZF flag.
static __inline int __DEFAULT_FN_ATTRS
_mm256_testz_ps(__m256 __a, __m256 __b)
{
return __builtin_ia32_vtestzps256((__v8sf)__a, (__v8sf)__b);
}
+/// \brief Given two 256-bit floating-point vectors of [8 x float], perform an
+/// element-by-element comparison of the single-precision element in the
+/// first source vector and the corresponding element in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns the value of the CF flag.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float].
+/// \param __b
+/// A 256-bit vector of [8 x float].
+/// \returns the CF flag.
static __inline int __DEFAULT_FN_ATTRS
_mm256_testc_ps(__m256 __a, __m256 __b)
{
return __builtin_ia32_vtestcps256((__v8sf)__a, (__v8sf)__b);
}
+/// \brief Given two 256-bit floating-point vectors of [8 x float], perform an
+/// element-by-element comparison of the single-precision elements in the
+/// first source vector and the corresponding elements in the second source
+/// vector. The EFLAGS register is updated as follows: \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bits of both elements are 1, the ZF flag is set to 0. Otherwise the
+/// ZF flag is set to 1. \n
+/// If there is at least one pair of single-precision elements where the
+/// sign-bit of the first element is 0 and the sign-bit of the second element
+/// is 1, the CF flag is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns 1 if both the ZF and CF flags are set to 0,
+/// otherwise it returns 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VTESTPS </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float].
+/// \param __b
+/// A 256-bit vector of [8 x float].
+/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
static __inline int __DEFAULT_FN_ATTRS
_mm256_testnzc_ps(__m256 __a, __m256 __b)
{
return __builtin_ia32_vtestnzcps256((__v8sf)__a, (__v8sf)__b);
}
+/// \brief Given two 256-bit integer vectors, perform a bit-by-bit comparison
+/// of the two source vectors and update the EFLAGS register as follows: \n
+/// If there is at least one pair of bits where both bits are 1, the ZF flag
+/// is set to 0. Otherwise the ZF flag is set to 1. \n
+/// If there is at least one pair of bits where the bit from the first source
+/// vector is 0 and the bit from the second source vector is 1, the CF flag
+/// is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns the value of the ZF flag.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPTEST </c> instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns the ZF flag.
static __inline int __DEFAULT_FN_ATTRS
_mm256_testz_si256(__m256i __a, __m256i __b)
{
return __builtin_ia32_ptestz256((__v4di)__a, (__v4di)__b);
}
+/// \brief Given two 256-bit integer vectors, perform a bit-by-bit comparison
+/// of the two source vectors and update the EFLAGS register as follows: \n
+/// If there is at least one pair of bits where both bits are 1, the ZF flag
+/// is set to 0. Otherwise the ZF flag is set to 1. \n
+/// If there is at least one pair of bits where the bit from the first source
+/// vector is 0 and the bit from the second source vector is 1, the CF flag
+/// is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns the value of the CF flag.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPTEST </c> instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns the CF flag.
static __inline int __DEFAULT_FN_ATTRS
_mm256_testc_si256(__m256i __a, __m256i __b)
{
return __builtin_ia32_ptestc256((__v4di)__a, (__v4di)__b);
}
+/// \brief Given two 256-bit integer vectors, perform a bit-by-bit comparison
+/// of the two source vectors and update the EFLAGS register as follows: \n
+/// If there is at least one pair of bits where both bits are 1, the ZF flag
+/// is set to 0. Otherwise the ZF flag is set to 1. \n
+/// If there is at least one pair of bits where the bit from the first source
+/// vector is 0 and the bit from the second source vector is 1, the CF flag
+/// is set to 0. Otherwise the CF flag is set to 1. \n
+/// This intrinsic returns 1 if both the ZF and CF flags are set to 0,
+/// otherwise it returns 0.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPTEST </c> instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \param __b
+/// A 256-bit integer vector.
+/// \returns 1 if both the ZF and CF flags are set to 0, otherwise returns 0.
static __inline int __DEFAULT_FN_ATTRS
_mm256_testnzc_si256(__m256i __a, __m256i __b)
{
@@ -2287,12 +2779,36 @@ _mm256_testnzc_si256(__m256i __a, __m256i __b)
}
/* Vector extract sign mask */
+/// \brief Extracts the sign bits of double-precision floating point elements
+/// in a 256-bit vector of [4 x double] and writes them to the lower order
+/// bits of the return value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVMSKPD </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [4 x double] containing the double-precision
+/// floating point values with sign bits to be extracted.
+/// \returns The sign bits from the operand, written to bits [3:0].
static __inline int __DEFAULT_FN_ATTRS
_mm256_movemask_pd(__m256d __a)
{
return __builtin_ia32_movmskpd256((__v4df)__a);
}
+/// \brief Extracts the sign bits of double-precision floating point elements
+/// in a 256-bit vector of [8 x float] and writes them to the lower order
+/// bits of the return value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVMSKPS </c> instruction.
+///
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the double-precision floating
+/// point values with sign bits to be extracted.
+/// \returns The sign bits from the operand, written to bits [7:0].
static __inline int __DEFAULT_FN_ATTRS
_mm256_movemask_ps(__m256 __a)
{
@@ -2300,12 +2816,22 @@ _mm256_movemask_ps(__m256 __a)
}
/* Vector __zero */
+/// \brief Zeroes the contents of all XMM or YMM registers.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VZEROALL </c> instruction.
static __inline void __DEFAULT_FN_ATTRS
_mm256_zeroall(void)
{
__builtin_ia32_vzeroall();
}
+/// \brief Zeroes the upper 128 bits (bits 255:128) of all YMM registers.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VZEROUPPER </c> instruction.
static __inline void __DEFAULT_FN_ATTRS
_mm256_zeroupper(void)
{
@@ -2313,6 +2839,18 @@ _mm256_zeroupper(void)
}
/* Vector load with broadcast */
+/// \brief Loads a scalar single-precision floating point value from the
+/// specified address pointed to by \a __a and broadcasts it to the elements
+/// of a [4 x float] vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VBROADCASTSS </c> instruction.
+///
+/// \param __a
+/// The single-precision floating point value to be broadcast.
+/// \returns A 128-bit vector of [4 x float] whose 32-bit elements are set
+/// equal to the broadcast value.
static __inline __m128 __DEFAULT_FN_ATTRS
_mm_broadcast_ss(float const *__a)
{
@@ -2320,6 +2858,18 @@ _mm_broadcast_ss(float const *__a)
return (__m128)(__v4sf){ __f, __f, __f, __f };
}
+/// \brief Loads a scalar double-precision floating point value from the
+/// specified address pointed to by \a __a and broadcasts it to the elements
+/// of a [4 x double] vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VBROADCASTSD </c> instruction.
+///
+/// \param __a
+/// The double-precision floating point value to be broadcast.
+/// \returns A 256-bit vector of [4 x double] whose 64-bit elements are set
+/// equal to the broadcast value.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_broadcast_sd(double const *__a)
{
@@ -2327,6 +2877,18 @@ _mm256_broadcast_sd(double const *__a)
return (__m256d)(__v4df){ __d, __d, __d, __d };
}
+/// \brief Loads a scalar single-precision floating point value from the
+/// specified address pointed to by \a __a and broadcasts it to the elements
+/// of a [8 x float] vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VBROADCASTSS </c> instruction.
+///
+/// \param __a
+/// The single-precision floating point value to be broadcast.
+/// \returns A 256-bit vector of [8 x float] whose 32-bit elements are set
+/// equal to the broadcast value.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_broadcast_ss(float const *__a)
{
@@ -2334,12 +2896,36 @@ _mm256_broadcast_ss(float const *__a)
return (__m256)(__v8sf){ __f, __f, __f, __f, __f, __f, __f, __f };
}
+/// \brief Loads the data from a 128-bit vector of [2 x double] from the
+/// specified address pointed to by \a __a and broadcasts it to 128-bit
+/// elements in a 256-bit vector of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VBROADCASTF128 </c> instruction.
+///
+/// \param __a
+/// The 128-bit vector of [2 x double] to be broadcast.
+/// \returns A 256-bit vector of [4 x double] whose 128-bit elements are set
+/// equal to the broadcast value.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_broadcast_pd(__m128d const *__a)
{
return (__m256d)__builtin_ia32_vbroadcastf128_pd256((__v2df const *)__a);
}
+/// \brief Loads the data from a 128-bit vector of [4 x float] from the
+/// specified address pointed to by \a __a and broadcasts it to 128-bit
+/// elements in a 256-bit vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VBROADCASTF128 </c> instruction.
+///
+/// \param __a
+/// The 128-bit vector of [4 x float] to be broadcast.
+/// \returns A 256-bit vector of [8 x float] whose 128-bit elements are set
+/// equal to the broadcast value.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_broadcast_ps(__m128 const *__a)
{
@@ -2347,18 +2933,50 @@ _mm256_broadcast_ps(__m128 const *__a)
}
/* SIMD load ops */
+/// \brief Loads 4 double-precision floating point values from a 32-byte aligned
+/// memory location pointed to by \a __p into a vector of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVAPD </c> instruction.
+///
+/// \param __p
+/// A 32-byte aligned pointer to a memory location containing
+/// double-precision floating point values.
+/// \returns A 256-bit vector of [4 x double] containing the moved values.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_load_pd(double const *__p)
{
return *(__m256d *)__p;
}
+/// \brief Loads 8 single-precision floating point values from a 32-byte aligned
+/// memory location pointed to by \a __p into a vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVAPS </c> instruction.
+///
+/// \param __p
+/// A 32-byte aligned pointer to a memory location containing float values.
+/// \returns A 256-bit vector of [8 x float] containing the moved values.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_load_ps(float const *__p)
{
return *(__m256 *)__p;
}
+/// \brief Loads 4 double-precision floating point values from an unaligned
+/// memory location pointed to by \a __p into a vector of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVUPD </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location containing double-precision floating
+/// point values.
+/// \returns A 256-bit vector of [4 x double] containing the moved values.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_loadu_pd(double const *__p)
{
@@ -2368,6 +2986,17 @@ _mm256_loadu_pd(double const *__p)
return ((struct __loadu_pd*)__p)->__v;
}
+/// \brief Loads 8 single-precision floating point values from an unaligned
+/// memory location pointed to by \a __p into a vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVUPS </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location containing single-precision floating
+/// point values.
+/// \returns A 256-bit vector of [8 x float] containing the moved values.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_loadu_ps(float const *__p)
{
@@ -2377,12 +3006,33 @@ _mm256_loadu_ps(float const *__p)
return ((struct __loadu_ps*)__p)->__v;
}
+/// \brief Loads 256 bits of integer data from a 32-byte aligned memory
+/// location pointed to by \a __p into elements of a 256-bit integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVDQA </c> instruction.
+///
+/// \param __p
+/// A 32-byte aligned pointer to a 256-bit integer vector containing integer
+/// values.
+/// \returns A 256-bit integer vector containing the moved values.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_load_si256(__m256i const *__p)
{
return *__p;
}
+/// \brief Loads 256 bits of integer data from an unaligned memory location
+/// pointed to by \a __p into a 256-bit integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVDQU </c> instruction.
+///
+/// \param __p
+/// A pointer to a 256-bit integer vector containing integer values.
+/// \returns A 256-bit integer vector containing the moved values.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_loadu_si256(__m256i const *__p)
{
@@ -2392,6 +3042,18 @@ _mm256_loadu_si256(__m256i const *__p)
return ((struct __loadu_si256*)__p)->__v;
}
+/// \brief Loads 256 bits of integer data from an unaligned memory location
+/// pointed to by \a __p into a 256-bit integer vector. This intrinsic may
+/// perform better than \c _mm256_loadu_si256 when the data crosses a cache
+/// line boundary.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VLDDQU </c> instruction.
+///
+/// \param __p
+/// A pointer to a 256-bit integer vector containing integer values.
+/// \returns A 256-bit integer vector containing the moved values.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_lddqu_si256(__m256i const *__p)
{
@@ -2399,18 +3061,55 @@ _mm256_lddqu_si256(__m256i const *__p)
}
/* SIMD store ops */
+/// \brief Stores double-precision floating point values from a 256-bit vector
+/// of [4 x double] to a 32-byte aligned memory location pointed to by
+/// \a __p.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVAPD </c> instruction.
+///
+/// \param __p
+/// A 32-byte aligned pointer to a memory location that will receive the
+/// double-precision floaing point values.
+/// \param __a
+/// A 256-bit vector of [4 x double] containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
_mm256_store_pd(double *__p, __m256d __a)
{
*(__m256d *)__p = __a;
}
+/// \brief Stores single-precision floating point values from a 256-bit vector
+/// of [8 x float] to a 32-byte aligned memory location pointed to by \a __p.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVAPS </c> instruction.
+///
+/// \param __p
+/// A 32-byte aligned pointer to a memory location that will receive the
+/// float values.
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
_mm256_store_ps(float *__p, __m256 __a)
{
*(__m256 *)__p = __a;
}
+/// \brief Stores double-precision floating point values from a 256-bit vector
+/// of [4 x double] to an unaligned memory location pointed to by \a __p.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVUPD </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that will receive the double-precision
+/// floating point values.
+/// \param __a
+/// A 256-bit vector of [4 x double] containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_pd(double *__p, __m256d __a)
{
@@ -2420,6 +3119,17 @@ _mm256_storeu_pd(double *__p, __m256d __a)
((struct __storeu_pd*)__p)->__v = __a;
}
+/// \brief Stores single-precision floating point values from a 256-bit vector
+/// of [8 x float] to an unaligned memory location pointed to by \a __p.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVUPS </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that will receive the float values.
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_ps(float *__p, __m256 __a)
{
@@ -2429,12 +3139,35 @@ _mm256_storeu_ps(float *__p, __m256 __a)
((struct __storeu_ps*)__p)->__v = __a;
}
+/// \brief Stores integer values from a 256-bit integer vector to a 32-byte
+/// aligned memory location pointed to by \a __p.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVDQA </c> instruction.
+///
+/// \param __p
+/// A 32-byte aligned pointer to a memory location that will receive the
+/// integer values.
+/// \param __a
+/// A 256-bit integer vector containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
_mm256_store_si256(__m256i *__p, __m256i __a)
{
*__p = __a;
}
+/// \brief Stores integer values from a 256-bit integer vector to an unaligned
+/// memory location pointed to by \a __p.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVDQU </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that will receive the integer values.
+/// \param __a
+/// A 256-bit integer vector containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu_si256(__m256i *__p, __m256i __a)
{
@@ -2445,12 +3178,48 @@ _mm256_storeu_si256(__m256i *__p, __m256i __a)
}
/* Conditional load ops */
+/// \brief Conditionally loads double-precision floating point elements from a
+/// memory location pointed to by \a __p into a 128-bit vector of
+/// [2 x double], depending on the mask bits associated with each data
+/// element.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMASKMOVPD </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that contains the double-precision
+/// floating point values.
+/// \param __m
+/// A 128-bit integer vector containing the mask. The most significant bit of
+/// each data element represents the mask bits. If a mask bit is zero, the
+/// corresponding value in the memory location is not loaded and the
+/// corresponding field in the return value is set to zero.
+/// \returns A 128-bit vector of [2 x double] containing the loaded values.
static __inline __m128d __DEFAULT_FN_ATTRS
_mm_maskload_pd(double const *__p, __m128i __m)
{
return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)__p, (__v2di)__m);
}
+/// \brief Conditionally loads double-precision floating point elements from a
+/// memory location pointed to by \a __p into a 256-bit vector of
+/// [4 x double], depending on the mask bits associated with each data
+/// element.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMASKMOVPD </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that contains the double-precision
+/// floating point values.
+/// \param __m
+/// A 256-bit integer vector of [4 x quadword] containing the mask. The most
+/// significant bit of each quadword element represents the mask bits. If a
+/// mask bit is zero, the corresponding value in the memory location is not
+/// loaded and the corresponding field in the return value is set to zero.
+/// \returns A 256-bit vector of [4 x double] containing the loaded values.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_maskload_pd(double const *__p, __m256i __m)
{
@@ -2458,12 +3227,48 @@ _mm256_maskload_pd(double const *__p, __m256i __m)
(__v4di)__m);
}
+/// \brief Conditionally loads single-precision floating point elements from a
+/// memory location pointed to by \a __p into a 128-bit vector of
+/// [4 x float], depending on the mask bits associated with each data
+/// element.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMASKMOVPS </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that contains the single-precision
+/// floating point values.
+/// \param __m
+/// A 128-bit integer vector containing the mask. The most significant bit of
+/// each data element represents the mask bits. If a mask bit is zero, the
+/// corresponding value in the memory location is not loaded and the
+/// corresponding field in the return value is set to zero.
+/// \returns A 128-bit vector of [4 x float] containing the loaded values.
static __inline __m128 __DEFAULT_FN_ATTRS
_mm_maskload_ps(float const *__p, __m128i __m)
{
return (__m128)__builtin_ia32_maskloadps((const __v4sf *)__p, (__v4si)__m);
}
+/// \brief Conditionally loads single-precision floating point elements from a
+/// memory location pointed to by \a __p into a 256-bit vector of
+/// [8 x float], depending on the mask bits associated with each data
+/// element.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMASKMOVPS </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that contains the single-precision
+/// floating point values.
+/// \param __m
+/// A 256-bit integer vector of [8 x dword] containing the mask. The most
+/// significant bit of each dword element represents the mask bits. If a mask
+/// bit is zero, the corresponding value in the memory location is not loaded
+/// and the corresponding field in the return value is set to zero.
+/// \returns A 256-bit vector of [8 x float] containing the loaded values.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_maskload_ps(float const *__p, __m256i __m)
{
@@ -2471,24 +3276,96 @@ _mm256_maskload_ps(float const *__p, __m256i __m)
}
/* Conditional store ops */
+/// \brief Moves single-precision floating point values from a 256-bit vector
+/// of [8 x float] to a memory location pointed to by \a __p, according to
+/// the specified mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMASKMOVPS </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that will receive the float values.
+/// \param __m
+/// A 256-bit integer vector of [8 x dword] containing the mask. The most
+/// significant bit of each dword element in the mask vector represents the
+/// mask bits. If a mask bit is zero, the corresponding value from vector
+/// \a __a is not stored and the corresponding field in the memory location
+/// pointed to by \a __p is not changed.
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the values to be stored.
static __inline void __DEFAULT_FN_ATTRS
_mm256_maskstore_ps(float *__p, __m256i __m, __m256 __a)
{
__builtin_ia32_maskstoreps256((__v8sf *)__p, (__v8si)__m, (__v8sf)__a);
}
+/// \brief Moves double-precision values from a 128-bit vector of [2 x double]
+/// to a memory location pointed to by \a __p, according to the specified
+/// mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMASKMOVPD </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that will receive the float values.
+/// \param __m
+/// A 128-bit integer vector containing the mask. The most significant bit of
+/// each field in the mask vector represents the mask bits. If a mask bit is
+/// zero, the corresponding value from vector \a __a is not stored and the
+/// corresponding field in the memory location pointed to by \a __p is not
+/// changed.
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the values to be stored.
static __inline void __DEFAULT_FN_ATTRS
_mm_maskstore_pd(double *__p, __m128i __m, __m128d __a)
{
__builtin_ia32_maskstorepd((__v2df *)__p, (__v2di)__m, (__v2df)__a);
}
+/// \brief Moves double-precision values from a 256-bit vector of [4 x double]
+/// to a memory location pointed to by \a __p, according to the specified
+/// mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMASKMOVPD </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that will receive the float values.
+/// \param __m
+/// A 256-bit integer vector of [4 x quadword] containing the mask. The most
+/// significant bit of each quadword element in the mask vector represents
+/// the mask bits. If a mask bit is zero, the corresponding value from vector
+/// __a is not stored and the corresponding field in the memory location
+/// pointed to by \a __p is not changed.
+/// \param __a
+/// A 256-bit vector of [4 x double] containing the values to be stored.
static __inline void __DEFAULT_FN_ATTRS
_mm256_maskstore_pd(double *__p, __m256i __m, __m256d __a)
{
__builtin_ia32_maskstorepd256((__v4df *)__p, (__v4di)__m, (__v4df)__a);
}
+/// \brief Moves single-precision floating point values from a 128-bit vector
+/// of [4 x float] to a memory location pointed to by \a __p, according to
+/// the specified mask.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMASKMOVPS </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that will receive the float values.
+/// \param __m
+/// A 128-bit integer vector containing the mask. The most significant bit of
+/// each field in the mask vector represents the mask bits. If a mask bit is
+/// zero, the corresponding value from vector __a is not stored and the
+/// corresponding field in the memory location pointed to by \a __p is not
+/// changed.
+/// \param __a
+/// A 128-bit vector of [4 x float] containing the values to be stored.
static __inline void __DEFAULT_FN_ATTRS
_mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
{
@@ -2496,18 +3373,58 @@ _mm_maskstore_ps(float *__p, __m128i __m, __m128 __a)
}
/* Cacheability support ops */
+/// \brief Moves integer data from a 256-bit integer vector to a 32-byte
+/// aligned memory location. To minimize caching, the data is flagged as
+/// non-temporal (unlikely to be used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVNTDQ </c> instruction.
+///
+/// \param __a
+/// A pointer to a 32-byte aligned memory location that will receive the
+/// integer values.
+/// \param __b
+/// A 256-bit integer vector containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
_mm256_stream_si256(__m256i *__a, __m256i __b)
{
__builtin_nontemporal_store((__v4di)__b, (__v4di*)__a);
}
+/// \brief Moves double-precision values from a 256-bit vector of [4 x double]
+/// to a 32-byte aligned memory location. To minimize caching, the data is
+/// flagged as non-temporal (unlikely to be used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVNTPD </c> instruction.
+///
+/// \param __a
+/// A pointer to a 32-byte aligned memory location that will receive the
+/// integer values.
+/// \param __b
+/// A 256-bit vector of [4 x double] containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
_mm256_stream_pd(double *__a, __m256d __b)
{
__builtin_nontemporal_store((__v4df)__b, (__v4df*)__a);
}
+/// \brief Moves single-precision floating point values from a 256-bit vector
+/// of [8 x float] to a 32-byte aligned memory location. To minimize
+/// caching, the data is flagged as non-temporal (unlikely to be used again
+/// soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVNTPS </c> instruction.
+///
+/// \param __p
+/// A pointer to a 32-byte aligned memory location that will receive the
+/// single-precision floating point values.
+/// \param __a
+/// A 256-bit vector of [8 x float] containing the values to be moved.
static __inline void __DEFAULT_FN_ATTRS
_mm256_stream_ps(float *__p, __m256 __a)
{
@@ -2515,30 +3432,105 @@ _mm256_stream_ps(float *__p, __m256 __a)
}
/* Create vectors */
+/// \brief Create a 256-bit vector of [4 x double] with undefined values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \returns A 256-bit vector of [4 x double] containing undefined values.
static __inline__ __m256d __DEFAULT_FN_ATTRS
_mm256_undefined_pd(void)
{
return (__m256d)__builtin_ia32_undef256();
}
+/// \brief Create a 256-bit vector of [8 x float] with undefined values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \returns A 256-bit vector of [8 x float] containing undefined values.
static __inline__ __m256 __DEFAULT_FN_ATTRS
_mm256_undefined_ps(void)
{
return (__m256)__builtin_ia32_undef256();
}
+/// \brief Create a 256-bit integer vector with undefined values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \returns A 256-bit integer vector containing undefined values.
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_undefined_si256(void)
{
return (__m256i)__builtin_ia32_undef256();
}
+/// \brief Constructs a 256-bit floating-point vector of [4 x double]
+/// initialized with the specified double-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUNPCKLPD+VINSERTF128 </c>
+/// instruction.
+///
+/// \param __a
+/// A double-precision floating-point value used to initialize bits [255:192]
+/// of the result.
+/// \param __b
+/// A double-precision floating-point value used to initialize bits [191:128]
+/// of the result.
+/// \param __c
+/// A double-precision floating-point value used to initialize bits [127:64]
+/// of the result.
+/// \param __d
+/// A double-precision floating-point value used to initialize bits [63:0]
+/// of the result.
+/// \returns An initialized 256-bit floating-point vector of [4 x double].
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_set_pd(double __a, double __b, double __c, double __d)
{
return (__m256d){ __d, __c, __b, __a };
}
+/// \brief Constructs a 256-bit floating-point vector of [8 x float] initialized
+/// with the specified single-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __a
+/// A single-precision floating-point value used to initialize bits [255:224]
+/// of the result.
+/// \param __b
+/// A single-precision floating-point value used to initialize bits [223:192]
+/// of the result.
+/// \param __c
+/// A single-precision floating-point value used to initialize bits [191:160]
+/// of the result.
+/// \param __d
+/// A single-precision floating-point value used to initialize bits [159:128]
+/// of the result.
+/// \param __e
+/// A single-precision floating-point value used to initialize bits [127:96]
+/// of the result.
+/// \param __f
+/// A single-precision floating-point value used to initialize bits [95:64]
+/// of the result.
+/// \param __g
+/// A single-precision floating-point value used to initialize bits [63:32]
+/// of the result.
+/// \param __h
+/// A single-precision floating-point value used to initialize bits [31:0]
+/// of the result.
+/// \returns An initialized 256-bit floating-point vector of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_set_ps(float __a, float __b, float __c, float __d,
float __e, float __f, float __g, float __h)
@@ -2546,6 +3538,31 @@ _mm256_set_ps(float __a, float __b, float __c, float __d,
return (__m256){ __h, __g, __f, __e, __d, __c, __b, __a };
}
+/// \brief Constructs a 256-bit integer vector initialized with the specified
+/// 32-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __i0
+/// A 32-bit integral value used to initialize bits [255:224] of the result.
+/// \param __i1
+/// A 32-bit integral value used to initialize bits [223:192] of the result.
+/// \param __i2
+/// A 32-bit integral value used to initialize bits [191:160] of the result.
+/// \param __i3
+/// A 32-bit integral value used to initialize bits [159:128] of the result.
+/// \param __i4
+/// A 32-bit integral value used to initialize bits [127:96] of the result.
+/// \param __i5
+/// A 32-bit integral value used to initialize bits [95:64] of the result.
+/// \param __i6
+/// A 32-bit integral value used to initialize bits [63:32] of the result.
+/// \param __i7
+/// A 32-bit integral value used to initialize bits [31:0] of the result.
+/// \returns An initialized 256-bit integer vector.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
int __i4, int __i5, int __i6, int __i7)
@@ -2553,6 +3570,47 @@ _mm256_set_epi32(int __i0, int __i1, int __i2, int __i3,
return (__m256i)(__v8si){ __i7, __i6, __i5, __i4, __i3, __i2, __i1, __i0 };
}
+/// \brief Constructs a 256-bit integer vector initialized with the specified
+/// 16-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __w15
+/// A 16-bit integral value used to initialize bits [255:240] of the result.
+/// \param __w14
+/// A 16-bit integral value used to initialize bits [239:224] of the result.
+/// \param __w13
+/// A 16-bit integral value used to initialize bits [223:208] of the result.
+/// \param __w12
+/// A 16-bit integral value used to initialize bits [207:192] of the result.
+/// \param __w11
+/// A 16-bit integral value used to initialize bits [191:176] of the result.
+/// \param __w10
+/// A 16-bit integral value used to initialize bits [175:160] of the result.
+/// \param __w09
+/// A 16-bit integral value used to initialize bits [159:144] of the result.
+/// \param __w08
+/// A 16-bit integral value used to initialize bits [143:128] of the result.
+/// \param __w07
+/// A 16-bit integral value used to initialize bits [127:112] of the result.
+/// \param __w06
+/// A 16-bit integral value used to initialize bits [111:96] of the result.
+/// \param __w05
+/// A 16-bit integral value used to initialize bits [95:80] of the result.
+/// \param __w04
+/// A 16-bit integral value used to initialize bits [79:64] of the result.
+/// \param __w03
+/// A 16-bit integral value used to initialize bits [63:48] of the result.
+/// \param __w02
+/// A 16-bit integral value used to initialize bits [47:32] of the result.
+/// \param __w01
+/// A 16-bit integral value used to initialize bits [31:16] of the result.
+/// \param __w00
+/// A 16-bit integral value used to initialize bits [15:0] of the result.
+/// \returns An initialized 256-bit integer vector.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
short __w11, short __w10, short __w09, short __w08,
@@ -2563,6 +3621,79 @@ _mm256_set_epi16(short __w15, short __w14, short __w13, short __w12,
__w07, __w08, __w09, __w10, __w11, __w12, __w13, __w14, __w15 };
}
+/// \brief Constructs a 256-bit integer vector initialized with the specified
+/// 8-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __b31
+/// An 8-bit integral value used to initialize bits [255:248] of the result.
+/// \param __b30
+/// An 8-bit integral value used to initialize bits [247:240] of the result.
+/// \param __b29
+/// An 8-bit integral value used to initialize bits [239:232] of the result.
+/// \param __b28
+/// An 8-bit integral value used to initialize bits [231:224] of the result.
+/// \param __b27
+/// An 8-bit integral value used to initialize bits [223:216] of the result.
+/// \param __b26
+/// An 8-bit integral value used to initialize bits [215:208] of the result.
+/// \param __b25
+/// An 8-bit integral value used to initialize bits [207:200] of the result.
+/// \param __b24
+/// An 8-bit integral value used to initialize bits [199:192] of the result.
+/// \param __b23
+/// An 8-bit integral value used to initialize bits [191:184] of the result.
+/// \param __b22
+/// An 8-bit integral value used to initialize bits [183:176] of the result.
+/// \param __b21
+/// An 8-bit integral value used to initialize bits [175:168] of the result.
+/// \param __b20
+/// An 8-bit integral value used to initialize bits [167:160] of the result.
+/// \param __b19
+/// An 8-bit integral value used to initialize bits [159:152] of the result.
+/// \param __b18
+/// An 8-bit integral value used to initialize bits [151:144] of the result.
+/// \param __b17
+/// An 8-bit integral value used to initialize bits [143:136] of the result.
+/// \param __b16
+/// An 8-bit integral value used to initialize bits [135:128] of the result.
+/// \param __b15
+/// An 8-bit integral value used to initialize bits [127:120] of the result.
+/// \param __b14
+/// An 8-bit integral value used to initialize bits [119:112] of the result.
+/// \param __b13
+/// An 8-bit integral value used to initialize bits [111:104] of the result.
+/// \param __b12
+/// An 8-bit integral value used to initialize bits [103:96] of the result.
+/// \param __b11
+/// An 8-bit integral value used to initialize bits [95:88] of the result.
+/// \param __b10
+/// An 8-bit integral value used to initialize bits [87:80] of the result.
+/// \param __b09
+/// An 8-bit integral value used to initialize bits [79:72] of the result.
+/// \param __b08
+/// An 8-bit integral value used to initialize bits [71:64] of the result.
+/// \param __b07
+/// An 8-bit integral value used to initialize bits [63:56] of the result.
+/// \param __b06
+/// An 8-bit integral value used to initialize bits [55:48] of the result.
+/// \param __b05
+/// An 8-bit integral value used to initialize bits [47:40] of the result.
+/// \param __b04
+/// An 8-bit integral value used to initialize bits [39:32] of the result.
+/// \param __b03
+/// An 8-bit integral value used to initialize bits [31:24] of the result.
+/// \param __b02
+/// An 8-bit integral value used to initialize bits [23:16] of the result.
+/// \param __b01
+/// An 8-bit integral value used to initialize bits [15:8] of the result.
+/// \param __b00
+/// An 8-bit integral value used to initialize bits [7:0] of the result.
+/// \returns An initialized 256-bit integer vector.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
char __b27, char __b26, char __b25, char __b24,
@@ -2581,6 +3712,23 @@ _mm256_set_epi8(char __b31, char __b30, char __b29, char __b28,
};
}
+/// \brief Constructs a 256-bit integer vector initialized with the specified
+/// 64-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKLQDQ+VINSERTF128 </c>
+/// instruction.
+///
+/// \param __a
+/// A 64-bit integral value used to initialize bits [255:192] of the result.
+/// \param __b
+/// A 64-bit integral value used to initialize bits [191:128] of the result.
+/// \param __c
+/// A 64-bit integral value used to initialize bits [127:64] of the result.
+/// \param __d
+/// A 64-bit integral value used to initialize bits [63:0] of the result.
+/// \returns An initialized 256-bit integer vector.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
{
@@ -2588,12 +3736,68 @@ _mm256_set_epi64x(long long __a, long long __b, long long __c, long long __d)
}
/* Create vectors with elements in reverse order */
+/// \brief Constructs a 256-bit floating-point vector of [4 x double],
+/// initialized in reverse order with the specified double-precision
+/// floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUNPCKLPD+VINSERTF128 </c>
+/// instruction.
+///
+/// \param __a
+/// A double-precision floating-point value used to initialize bits [63:0]
+/// of the result.
+/// \param __b
+/// A double-precision floating-point value used to initialize bits [127:64]
+/// of the result.
+/// \param __c
+/// A double-precision floating-point value used to initialize bits [191:128]
+/// of the result.
+/// \param __d
+/// A double-precision floating-point value used to initialize bits [255:192]
+/// of the result.
+/// \returns An initialized 256-bit floating-point vector of [4 x double].
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_setr_pd(double __a, double __b, double __c, double __d)
{
return (__m256d){ __a, __b, __c, __d };
}
+/// \brief Constructs a 256-bit floating-point vector of [8 x float],
+/// initialized in reverse order with the specified single-precision
+/// float-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __a
+/// A single-precision floating-point value used to initialize bits [31:0]
+/// of the result.
+/// \param __b
+/// A single-precision floating-point value used to initialize bits [63:32]
+/// of the result.
+/// \param __c
+/// A single-precision floating-point value used to initialize bits [95:64]
+/// of the result.
+/// \param __d
+/// A single-precision floating-point value used to initialize bits [127:96]
+/// of the result.
+/// \param __e
+/// A single-precision floating-point value used to initialize bits [159:128]
+/// of the result.
+/// \param __f
+/// A single-precision floating-point value used to initialize bits [191:160]
+/// of the result.
+/// \param __g
+/// A single-precision floating-point value used to initialize bits [223:192]
+/// of the result.
+/// \param __h
+/// A single-precision floating-point value used to initialize bits [255:224]
+/// of the result.
+/// \returns An initialized 256-bit floating-point vector of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_setr_ps(float __a, float __b, float __c, float __d,
float __e, float __f, float __g, float __h)
@@ -2601,6 +3805,31 @@ _mm256_setr_ps(float __a, float __b, float __c, float __d,
return (__m256){ __a, __b, __c, __d, __e, __f, __g, __h };
}
+/// \brief Constructs a 256-bit integer vector, initialized in reverse order
+/// with the specified 32-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __i0
+/// A 32-bit integral value used to initialize bits [31:0] of the result.
+/// \param __i1
+/// A 32-bit integral value used to initialize bits [63:32] of the result.
+/// \param __i2
+/// A 32-bit integral value used to initialize bits [95:64] of the result.
+/// \param __i3
+/// A 32-bit integral value used to initialize bits [127:96] of the result.
+/// \param __i4
+/// A 32-bit integral value used to initialize bits [159:128] of the result.
+/// \param __i5
+/// A 32-bit integral value used to initialize bits [191:160] of the result.
+/// \param __i6
+/// A 32-bit integral value used to initialize bits [223:192] of the result.
+/// \param __i7
+/// A 32-bit integral value used to initialize bits [255:224] of the result.
+/// \returns An initialized 256-bit integer vector.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
int __i4, int __i5, int __i6, int __i7)
@@ -2608,6 +3837,47 @@ _mm256_setr_epi32(int __i0, int __i1, int __i2, int __i3,
return (__m256i)(__v8si){ __i0, __i1, __i2, __i3, __i4, __i5, __i6, __i7 };
}
+/// \brief Constructs a 256-bit integer vector, initialized in reverse order
+/// with the specified 16-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __w15
+/// A 16-bit integral value used to initialize bits [15:0] of the result.
+/// \param __w14
+/// A 16-bit integral value used to initialize bits [31:16] of the result.
+/// \param __w13
+/// A 16-bit integral value used to initialize bits [47:32] of the result.
+/// \param __w12
+/// A 16-bit integral value used to initialize bits [63:48] of the result.
+/// \param __w11
+/// A 16-bit integral value used to initialize bits [79:64] of the result.
+/// \param __w10
+/// A 16-bit integral value used to initialize bits [95:80] of the result.
+/// \param __w09
+/// A 16-bit integral value used to initialize bits [111:96] of the result.
+/// \param __w08
+/// A 16-bit integral value used to initialize bits [127:112] of the result.
+/// \param __w07
+/// A 16-bit integral value used to initialize bits [143:128] of the result.
+/// \param __w06
+/// A 16-bit integral value used to initialize bits [159:144] of the result.
+/// \param __w05
+/// A 16-bit integral value used to initialize bits [175:160] of the result.
+/// \param __w04
+/// A 16-bit integral value used to initialize bits [191:176] of the result.
+/// \param __w03
+/// A 16-bit integral value used to initialize bits [207:192] of the result.
+/// \param __w02
+/// A 16-bit integral value used to initialize bits [223:208] of the result.
+/// \param __w01
+/// A 16-bit integral value used to initialize bits [239:224] of the result.
+/// \param __w00
+/// A 16-bit integral value used to initialize bits [255:240] of the result.
+/// \returns An initialized 256-bit integer vector.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
short __w11, short __w10, short __w09, short __w08,
@@ -2618,6 +3888,79 @@ _mm256_setr_epi16(short __w15, short __w14, short __w13, short __w12,
__w08, __w07, __w06, __w05, __w04, __w03, __w02, __w01, __w00 };
}
+/// \brief Constructs a 256-bit integer vector, initialized in reverse order
+/// with the specified 8-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __b31
+/// An 8-bit integral value used to initialize bits [7:0] of the result.
+/// \param __b30
+/// An 8-bit integral value used to initialize bits [15:8] of the result.
+/// \param __b29
+/// An 8-bit integral value used to initialize bits [23:16] of the result.
+/// \param __b28
+/// An 8-bit integral value used to initialize bits [31:24] of the result.
+/// \param __b27
+/// An 8-bit integral value used to initialize bits [39:32] of the result.
+/// \param __b26
+/// An 8-bit integral value used to initialize bits [47:40] of the result.
+/// \param __b25
+/// An 8-bit integral value used to initialize bits [55:48] of the result.
+/// \param __b24
+/// An 8-bit integral value used to initialize bits [63:56] of the result.
+/// \param __b23
+/// An 8-bit integral value used to initialize bits [71:64] of the result.
+/// \param __b22
+/// An 8-bit integral value used to initialize bits [79:72] of the result.
+/// \param __b21
+/// An 8-bit integral value used to initialize bits [87:80] of the result.
+/// \param __b20
+/// An 8-bit integral value used to initialize bits [95:88] of the result.
+/// \param __b19
+/// An 8-bit integral value used to initialize bits [103:96] of the result.
+/// \param __b18
+/// An 8-bit integral value used to initialize bits [111:104] of the result.
+/// \param __b17
+/// An 8-bit integral value used to initialize bits [119:112] of the result.
+/// \param __b16
+/// An 8-bit integral value used to initialize bits [127:120] of the result.
+/// \param __b15
+/// An 8-bit integral value used to initialize bits [135:128] of the result.
+/// \param __b14
+/// An 8-bit integral value used to initialize bits [143:136] of the result.
+/// \param __b13
+/// An 8-bit integral value used to initialize bits [151:144] of the result.
+/// \param __b12
+/// An 8-bit integral value used to initialize bits [159:152] of the result.
+/// \param __b11
+/// An 8-bit integral value used to initialize bits [167:160] of the result.
+/// \param __b10
+/// An 8-bit integral value used to initialize bits [175:168] of the result.
+/// \param __b09
+/// An 8-bit integral value used to initialize bits [183:176] of the result.
+/// \param __b08
+/// An 8-bit integral value used to initialize bits [191:184] of the result.
+/// \param __b07
+/// An 8-bit integral value used to initialize bits [199:192] of the result.
+/// \param __b06
+/// An 8-bit integral value used to initialize bits [207:200] of the result.
+/// \param __b05
+/// An 8-bit integral value used to initialize bits [215:208] of the result.
+/// \param __b04
+/// An 8-bit integral value used to initialize bits [223:216] of the result.
+/// \param __b03
+/// An 8-bit integral value used to initialize bits [231:224] of the result.
+/// \param __b02
+/// An 8-bit integral value used to initialize bits [239:232] of the result.
+/// \param __b01
+/// An 8-bit integral value used to initialize bits [247:240] of the result.
+/// \param __b00
+/// An 8-bit integral value used to initialize bits [255:248] of the result.
+/// \returns An initialized 256-bit integer vector.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
char __b27, char __b26, char __b25, char __b24,
@@ -2635,6 +3978,23 @@ _mm256_setr_epi8(char __b31, char __b30, char __b29, char __b28,
__b07, __b06, __b05, __b04, __b03, __b02, __b01, __b00 };
}
+/// \brief Constructs a 256-bit integer vector, initialized in reverse order
+/// with the specified 64-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKLQDQ+VINSERTF128 </c>
+/// instruction.
+///
+/// \param __a
+/// A 64-bit integral value used to initialize bits [63:0] of the result.
+/// \param __b
+/// A 64-bit integral value used to initialize bits [127:64] of the result.
+/// \param __c
+/// A 64-bit integral value used to initialize bits [191:128] of the result.
+/// \param __d
+/// A 64-bit integral value used to initialize bits [255:192] of the result.
+/// \returns An initialized 256-bit integer vector.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
{
@@ -2642,24 +4002,74 @@ _mm256_setr_epi64x(long long __a, long long __b, long long __c, long long __d)
}
/* Create vectors with repeated elements */
+/// \brief Constructs a 256-bit floating-point vector of [4 x double], with each
+/// of the four double-precision floating-point vector elements set to the
+/// specified double-precision floating-point value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVDDUP+VINSERTF128 </c> instruction.
+///
+/// \param __w
+/// A double-precision floating-point value used to initialize each vector
+/// element of the result.
+/// \returns An initialized 256-bit floating-point vector of [4 x double].
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_set1_pd(double __w)
{
return (__m256d){ __w, __w, __w, __w };
}
+/// \brief Constructs a 256-bit floating-point vector of [8 x float], with each
+/// of the eight single-precision floating-point vector elements set to the
+/// specified single-precision floating-point value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPERMILPS+VINSERTF128 </c>
+/// instruction.
+///
+/// \param __w
+/// A single-precision floating-point value used to initialize each vector
+/// element of the result.
+/// \returns An initialized 256-bit floating-point vector of [8 x float].
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_set1_ps(float __w)
{
return (__m256){ __w, __w, __w, __w, __w, __w, __w, __w };
}
+/// \brief Constructs a 256-bit integer vector of [8 x i32], with each of the
+/// 32-bit integral vector elements set to the specified 32-bit integral
+/// value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPERMILPS+VINSERTF128 </c>
+/// instruction.
+///
+/// \param __i
+/// A 32-bit integral value used to initialize each vector element of the
+/// result.
+/// \returns An initialized 256-bit integer vector of [8 x i32].
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set1_epi32(int __i)
{
return (__m256i)(__v8si){ __i, __i, __i, __i, __i, __i, __i, __i };
}
+/// \brief Constructs a 256-bit integer vector of [16 x i16], with each of the
+/// 16-bit integral vector elements set to the specified 16-bit integral
+/// value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPSHUFB+VINSERTF128 </c> instruction.
+///
+/// \param __w
+/// A 16-bit integral value used to initialize each vector element of the
+/// result.
+/// \returns An initialized 256-bit integer vector of [16 x i16].
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set1_epi16(short __w)
{
@@ -2667,6 +4077,17 @@ _mm256_set1_epi16(short __w)
__w, __w, __w, __w, __w, __w };
}
+/// \brief Constructs a 256-bit integer vector of [32 x i8], with each of the
+/// 8-bit integral vector elements set to the specified 8-bit integral value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPSHUFB+VINSERTF128 </c> instruction.
+///
+/// \param __b
+/// An 8-bit integral value used to initialize each vector element of the
+/// result.
+/// \returns An initialized 256-bit integer vector of [32 x i8].
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set1_epi8(char __b)
{
@@ -2675,6 +4096,18 @@ _mm256_set1_epi8(char __b)
__b, __b, __b, __b, __b, __b, __b };
}
+/// \brief Constructs a 256-bit integer vector of [4 x i64], with each of the
+/// 64-bit integral vector elements set to the specified 64-bit integral
+/// value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVDDUP+VINSERTF128 </c> instruction.
+///
+/// \param __q
+/// A 64-bit integral value used to initialize each vector element of the
+/// result.
+/// \returns An initialized 256-bit integer vector of [4 x i64].
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_set1_epi64x(long long __q)
{
@@ -2682,18 +4115,41 @@ _mm256_set1_epi64x(long long __q)
}
/* Create __zeroed vectors */
+/// \brief Constructs a 256-bit floating-point vector of [4 x double] with all
+/// vector elements initialized to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VXORPS </c> instruction.
+///
+/// \returns A 256-bit vector of [4 x double] with all elements set to zero.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_setzero_pd(void)
{
return (__m256d){ 0, 0, 0, 0 };
}
+/// \brief Constructs a 256-bit floating-point vector of [8 x float] with all
+/// vector elements initialized to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VXORPS </c> instruction.
+///
+/// \returns A 256-bit vector of [8 x float] with all elements set to zero.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_setzero_ps(void)
{
return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
}
+/// \brief Constructs a 256-bit integer vector initialized to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VXORPS </c> instruction.
+///
+/// \returns A 256-bit integer vector initialized to zero.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_setzero_si256(void)
{
@@ -2701,72 +4157,210 @@ _mm256_setzero_si256(void)
}
/* Cast between vector types */
+/// \brief Casts a 256-bit floating-point vector of [4 x double] into a 256-bit
+/// floating-point vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 256-bit floating-point vector of [4 x double].
+/// \returns A 256-bit floating-point vector of [8 x float] containing the same
+/// bitwise pattern as the parameter.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_castpd_ps(__m256d __a)
{
return (__m256)__a;
}
+/// \brief Casts a 256-bit floating-point vector of [4 x double] into a 256-bit
+/// integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 256-bit floating-point vector of [4 x double].
+/// \returns A 256-bit integer vector containing the same bitwise pattern as the
+/// parameter.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_castpd_si256(__m256d __a)
{
return (__m256i)__a;
}
+/// \brief Casts a 256-bit floating-point vector of [8 x float] into a 256-bit
+/// floating-point vector of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 256-bit floating-point vector of [8 x float].
+/// \returns A 256-bit floating-point vector of [4 x double] containing the same
+/// bitwise pattern as the parameter.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_castps_pd(__m256 __a)
{
return (__m256d)__a;
}
+/// \brief Casts a 256-bit floating-point vector of [8 x float] into a 256-bit
+/// integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 256-bit floating-point vector of [8 x float].
+/// \returns A 256-bit integer vector containing the same bitwise pattern as the
+/// parameter.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_castps_si256(__m256 __a)
{
return (__m256i)__a;
}
+/// \brief Casts a 256-bit integer vector into a 256-bit floating-point vector
+/// of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \returns A 256-bit floating-point vector of [8 x float] containing the same
+/// bitwise pattern as the parameter.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_castsi256_ps(__m256i __a)
{
return (__m256)__a;
}
+/// \brief Casts a 256-bit integer vector into a 256-bit floating-point vector
+/// of [4 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \returns A 256-bit floating-point vector of [4 x double] containing the same
+/// bitwise pattern as the parameter.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_castsi256_pd(__m256i __a)
{
return (__m256d)__a;
}
+/// \brief Returns the lower 128 bits of a 256-bit floating-point vector of
+/// [4 x double] as a 128-bit floating-point vector of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 256-bit floating-point vector of [4 x double].
+/// \returns A 128-bit floating-point vector of [2 x double] containing the
+/// lower 128 bits of the parameter.
static __inline __m128d __DEFAULT_FN_ATTRS
_mm256_castpd256_pd128(__m256d __a)
{
return __builtin_shufflevector((__v4df)__a, (__v4df)__a, 0, 1);
}
+/// \brief Returns the lower 128 bits of a 256-bit floating-point vector of
+/// [8 x float] as a 128-bit floating-point vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 256-bit floating-point vector of [8 x float].
+/// \returns A 128-bit floating-point vector of [4 x float] containing the
+/// lower 128 bits of the parameter.
static __inline __m128 __DEFAULT_FN_ATTRS
_mm256_castps256_ps128(__m256 __a)
{
return __builtin_shufflevector((__v8sf)__a, (__v8sf)__a, 0, 1, 2, 3);
}
+/// \brief Truncates a 256-bit integer vector into a 128-bit integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 256-bit integer vector.
+/// \returns A 128-bit integer vector containing the lower 128 bits of the
+/// parameter.
static __inline __m128i __DEFAULT_FN_ATTRS
_mm256_castsi256_si128(__m256i __a)
{
return __builtin_shufflevector((__v4di)__a, (__v4di)__a, 0, 1);
}
+/// \brief Constructs a 256-bit floating-point vector of [4 x double] from a
+/// 128-bit floating-point vector of [2 x double]. The lower 128 bits
+/// contain the value of the source vector. The contents of the upper 128
+/// bits are undefined.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \returns A 256-bit floating-point vector of [4 x double]. The lower 128 bits
+/// contain the value of the parameter. The contents of the upper 128 bits
+/// are undefined.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_castpd128_pd256(__m128d __a)
{
return __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 1, -1, -1);
}
+/// \brief Constructs a 256-bit floating-point vector of [8 x float] from a
+/// 128-bit floating-point vector of [4 x float]. The lower 128 bits contain
+/// the value of the source vector. The contents of the upper 128 bits are
+/// undefined.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float].
+/// \returns A 256-bit floating-point vector of [8 x float]. The lower 128 bits
+/// contain the value of the parameter. The contents of the upper 128 bits
+/// are undefined.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_castps128_ps256(__m128 __a)
{
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1, 2, 3, -1, -1, -1, -1);
}
+/// \brief Constructs a 256-bit integer vector from a 128-bit integer vector.
+/// The lower 128 bits contain the value of the source vector. The contents
+/// of the upper 128 bits are undefined.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \returns A 256-bit integer vector. The lower 128 bits contain the value of
+/// the parameter. The contents of the upper 128 bits are undefined.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_castsi128_si256(__m128i __a)
{
@@ -2778,6 +4372,38 @@ _mm256_castsi128_si256(__m128i __a)
We use macros rather than inlines because we only want to accept
invocations where the immediate M is a constant expression.
*/
+/// \brief Constructs a new 256-bit vector of [8 x float] by first duplicating
+/// a 256-bit vector of [8 x float] given in the first parameter, and then
+/// replacing either the upper or the lower 128 bits with the contents of a
+/// 128-bit vector of [4 x float] in the second parameter. The immediate
+/// integer parameter determines between the upper or the lower 128 bits.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256 _mm256_insertf128_ps(__m256 V1, __m128 V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param V1
+/// A 256-bit vector of [8 x float]. This vector is copied to the result
+/// first, and then either the upper or the lower 128 bits of the result will
+/// be replaced by the contents of \a V2.
+/// \param V2
+/// A 128-bit vector of [4 x float]. The contents of this parameter are
+/// written to either the upper or the lower 128 bits of the result depending
+/// on the value of parameter \a M.
+/// \param M
+/// An immediate integer. The least significant bit determines how the values
+/// from the two parameters are interleaved: \n
+/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result,
+/// and bits [255:128] of \a V1 are copied to bits [255:128] of the
+/// result. \n
+/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the
+/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the
+/// result.
+/// \returns A 256-bit vector of [8 x float] containing the interleaved values.
#define _mm256_insertf128_ps(V1, V2, M) __extension__ ({ \
(__m256)__builtin_shufflevector( \
(__v8sf)(__m256)(V1), \
@@ -2791,6 +4417,38 @@ _mm256_castsi128_si256(__m128i __a)
(((M) & 1) ? 10 : 6), \
(((M) & 1) ? 11 : 7) );})
+/// \brief Constructs a new 256-bit vector of [4 x double] by first duplicating
+/// a 256-bit vector of [4 x double] given in the first parameter, and then
+/// replacing either the upper or the lower 128 bits with the contents of a
+/// 128-bit vector of [2 x double] in the second parameter. The immediate
+/// integer parameter determines between the upper or the lower 128 bits.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256d _mm256_insertf128_pd(__m256d V1, __m128d V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param V1
+/// A 256-bit vector of [4 x double]. This vector is copied to the result
+/// first, and then either the upper or the lower 128 bits of the result will
+/// be replaced by the contents of \a V2.
+/// \param V2
+/// A 128-bit vector of [2 x double]. The contents of this parameter are
+/// written to either the upper or the lower 128 bits of the result depending
+/// on the value of parameter \a M.
+/// \param M
+/// An immediate integer. The least significant bit determines how the values
+/// from the two parameters are interleaved: \n
+/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result,
+/// and bits [255:128] of \a V1 are copied to bits [255:128] of the
+/// result. \n
+/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the
+/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the
+/// result.
+/// \returns A 256-bit vector of [4 x double] containing the interleaved values.
#define _mm256_insertf128_pd(V1, V2, M) __extension__ ({ \
(__m256d)__builtin_shufflevector( \
(__v4df)(__m256d)(V1), \
@@ -2800,6 +4458,38 @@ _mm256_castsi128_si256(__m128i __a)
(((M) & 1) ? 4 : 2), \
(((M) & 1) ? 5 : 3) );})
+/// \brief Constructs a new 256-bit integer vector by first duplicating a
+/// 256-bit integer vector given in the first parameter, and then replacing
+/// either the upper or the lower 128 bits with the contents of a 128-bit
+/// integer vector in the second parameter. The immediate integer parameter
+/// determines between the upper or the lower 128 bits.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m256i _mm256_insertf128_si256(__m256i V1, __m128i V2, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param V1
+/// A 256-bit integer vector. This vector is copied to the result first, and
+/// then either the upper or the lower 128 bits of the result will be
+/// replaced by the contents of \a V2.
+/// \param V2
+/// A 128-bit integer vector. The contents of this parameter are written to
+/// either the upper or the lower 128 bits of the result depending on the
+/// value of parameter \a M.
+/// \param M
+/// An immediate integer. The least significant bit determines how the values
+/// from the two parameters are interleaved: \n
+/// If bit [0] of \a M is 0, \a V2 are copied to bits [127:0] of the result,
+/// and bits [255:128] of \a V1 are copied to bits [255:128] of the
+/// result. \n
+/// If bit [0] of \a M is 1, \a V2 are copied to bits [255:128] of the
+/// result, and bits [127:0] of \a V1 are copied to bits [127:0] of the
+/// result.
+/// \returns A 256-bit integer vector containing the interleaved values.
#define _mm256_insertf128_si256(V1, V2, M) __extension__ ({ \
(__m256i)__builtin_shufflevector( \
(__v4di)(__m256i)(V1), \
@@ -2814,6 +4504,27 @@ _mm256_castsi128_si256(__m128i __a)
We use macros rather than inlines because we only want to accept
invocations where the immediate M is a constant expression.
*/
+/// \brief Extracts either the upper or the lower 128 bits from a 256-bit vector
+/// of [8 x float], as determined by the immediate integer parameter, and
+/// returns the extracted bits as a 128-bit vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128 _mm256_extractf128_ps(__m256 V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction.
+///
+/// \param V
+/// A 256-bit vector of [8 x float].
+/// \param M
+/// An immediate integer. The least significant bit determines which bits are
+/// extracted from the first parameter: \n
+/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the
+/// result. \n
+/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
+/// \returns A 128-bit vector of [4 x float] containing the extracted bits.
#define _mm256_extractf128_ps(V, M) __extension__ ({ \
(__m128)__builtin_shufflevector( \
(__v8sf)(__m256)(V), \
@@ -2823,6 +4534,27 @@ _mm256_castsi128_si256(__m128i __a)
(((M) & 1) ? 6 : 2), \
(((M) & 1) ? 7 : 3) );})
+/// \brief Extracts either the upper or the lower 128 bits from a 256-bit vector
+/// of [4 x double], as determined by the immediate integer parameter, and
+/// returns the extracted bits as a 128-bit vector of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm256_extractf128_pd(__m256d V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction.
+///
+/// \param V
+/// A 256-bit vector of [4 x double].
+/// \param M
+/// An immediate integer. The least significant bit determines which bits are
+/// extracted from the first parameter: \n
+/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the
+/// result. \n
+/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
+/// \returns A 128-bit vector of [2 x double] containing the extracted bits.
#define _mm256_extractf128_pd(V, M) __extension__ ({ \
(__m128d)__builtin_shufflevector( \
(__v4df)(__m256d)(V), \
@@ -2830,6 +4562,27 @@ _mm256_castsi128_si256(__m128i __a)
(((M) & 1) ? 2 : 0), \
(((M) & 1) ? 3 : 1) );})
+/// \brief Extracts either the upper or the lower 128 bits from a 256-bit
+/// integer vector, as determined by the immediate integer parameter, and
+/// returns the extracted bits as a 128-bit integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm256_extractf128_si256(__m256i V, const int M);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction.
+///
+/// \param V
+/// A 256-bit integer vector.
+/// \param M
+/// An immediate integer. The least significant bit determines which bits are
+/// extracted from the first parameter: \n
+/// If bit [0] of \a M is 0, bits [127:0] of \a V are copied to the
+/// result. \n
+/// If bit [0] of \a M is 1, bits [255:128] of \a V are copied to the result.
+/// \returns A 128-bit integer vector containing the extracted bits.
#define _mm256_extractf128_si256(V, M) __extension__ ({ \
(__m128i)__builtin_shufflevector( \
(__v4di)(__m256i)(V), \
@@ -2838,6 +4591,27 @@ _mm256_castsi128_si256(__m128i __a)
(((M) & 1) ? 3 : 1) );})
/* SIMD load ops (unaligned) */
+/// \brief Loads two 128-bit floating-point vectors of [4 x float] from
+/// unaligned memory locations and constructs a 256-bit floating-point vector
+/// of [8 x float] by concatenating the two 128-bit vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to load instructions followed by the
+/// <c> VINSERTF128 </c> instruction.
+///
+/// \param __addr_hi
+/// A pointer to a 128-bit memory location containing 4 consecutive
+/// single-precision floating-point values. These values are to be copied to
+/// bits[255:128] of the result. The address of the memory location does not
+/// have to be aligned.
+/// \param __addr_lo
+/// A pointer to a 128-bit memory location containing 4 consecutive
+/// single-precision floating-point values. These values are to be copied to
+/// bits[127:0] of the result. The address of the memory location does not
+/// have to be aligned.
+/// \returns A 256-bit floating-point vector of [8 x float] containing the
+/// concatenated result.
static __inline __m256 __DEFAULT_FN_ATTRS
_mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
{
@@ -2845,6 +4619,27 @@ _mm256_loadu2_m128(float const *__addr_hi, float const *__addr_lo)
return _mm256_insertf128_ps(__v256, _mm_loadu_ps(__addr_hi), 1);
}
+/// \brief Loads two 128-bit floating-point vectors of [2 x double] from
+/// unaligned memory locations and constructs a 256-bit floating-point vector
+/// of [4 x double] by concatenating the two 128-bit vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to load instructions followed by the
+/// <c> VINSERTF128 </c> instruction.
+///
+/// \param __addr_hi
+/// A pointer to a 128-bit memory location containing two consecutive
+/// double-precision floating-point values. These values are to be copied to
+/// bits[255:128] of the result. The address of the memory location does not
+/// have to be aligned.
+/// \param __addr_lo
+/// A pointer to a 128-bit memory location containing two consecutive
+/// double-precision floating-point values. These values are to be copied to
+/// bits[127:0] of the result. The address of the memory location does not
+/// have to be aligned.
+/// \returns A 256-bit floating-point vector of [4 x double] containing the
+/// concatenated result.
static __inline __m256d __DEFAULT_FN_ATTRS
_mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
{
@@ -2852,6 +4647,24 @@ _mm256_loadu2_m128d(double const *__addr_hi, double const *__addr_lo)
return _mm256_insertf128_pd(__v256, _mm_loadu_pd(__addr_hi), 1);
}
+/// \brief Loads two 128-bit integer vectors from unaligned memory locations and
+/// constructs a 256-bit integer vector by concatenating the two 128-bit
+/// vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to load instructions followed by the
+/// <c> VINSERTF128 </c> instruction.
+///
+/// \param __addr_hi
+/// A pointer to a 128-bit memory location containing a 128-bit integer
+/// vector. This vector is to be copied to bits[255:128] of the result. The
+/// address of the memory location does not have to be aligned.
+/// \param __addr_lo
+/// A pointer to a 128-bit memory location containing a 128-bit integer
+/// vector. This vector is to be copied to bits[127:0] of the result. The
+/// address of the memory location does not have to be aligned.
+/// \returns A 256-bit integer vector containing the concatenated result.
static __inline __m256i __DEFAULT_FN_ATTRS
_mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
{
@@ -2860,6 +4673,24 @@ _mm256_loadu2_m128i(__m128i const *__addr_hi, __m128i const *__addr_lo)
}
/* SIMD store ops (unaligned) */
+/// \brief Stores the upper and lower 128 bits of a 256-bit floating-point
+/// vector of [8 x float] into two different unaligned memory locations.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction and the
+/// store instructions.
+///
+/// \param __addr_hi
+/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be
+/// copied to this memory location. The address of this memory location does
+/// not have to be aligned.
+/// \param __addr_lo
+/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be
+/// copied to this memory location. The address of this memory location does
+/// not have to be aligned.
+/// \param __a
+/// A 256-bit floating-point vector of [8 x float].
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
{
@@ -2871,6 +4702,24 @@ _mm256_storeu2_m128(float *__addr_hi, float *__addr_lo, __m256 __a)
_mm_storeu_ps(__addr_hi, __v128);
}
+/// \brief Stores the upper and lower 128 bits of a 256-bit floating-point
+/// vector of [4 x double] into two different unaligned memory locations.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction and the
+/// store instructions.
+///
+/// \param __addr_hi
+/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be
+/// copied to this memory location. The address of this memory location does
+/// not have to be aligned.
+/// \param __addr_lo
+/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be
+/// copied to this memory location. The address of this memory location does
+/// not have to be aligned.
+/// \param __a
+/// A 256-bit floating-point vector of [4 x double].
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
{
@@ -2882,6 +4731,24 @@ _mm256_storeu2_m128d(double *__addr_hi, double *__addr_lo, __m256d __a)
_mm_storeu_pd(__addr_hi, __v128);
}
+/// \brief Stores the upper and lower 128 bits of a 256-bit integer vector into
+/// two different unaligned memory locations.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VEXTRACTF128 </c> instruction and the
+/// store instructions.
+///
+/// \param __addr_hi
+/// A pointer to a 128-bit memory location. Bits[255:128] of \a __a are to be
+/// copied to this memory location. The address of this memory location does
+/// not have to be aligned.
+/// \param __addr_lo
+/// A pointer to a 128-bit memory location. Bits[127:0] of \a __a are to be
+/// copied to this memory location. The address of this memory location does
+/// not have to be aligned.
+/// \param __a
+/// A 256-bit integer vector.
static __inline void __DEFAULT_FN_ATTRS
_mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
{
@@ -2893,33 +4760,132 @@ _mm256_storeu2_m128i(__m128i *__addr_hi, __m128i *__addr_lo, __m256i __a)
_mm_storeu_si128(__addr_hi, __v128);
}
+/// \brief Constructs a 256-bit floating-point vector of [8 x float] by
+/// concatenating two 128-bit floating-point vectors of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __hi
+/// A 128-bit floating-point vector of [4 x float] to be copied to the upper
+/// 128 bits of the result.
+/// \param __lo
+/// A 128-bit floating-point vector of [4 x float] to be copied to the lower
+/// 128 bits of the result.
+/// \returns A 256-bit floating-point vector of [8 x float] containing the
+/// concatenated result.
static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_set_m128 (__m128 __hi, __m128 __lo) {
+_mm256_set_m128 (__m128 __hi, __m128 __lo)
+{
return (__m256) __builtin_shufflevector((__v4sf)__lo, (__v4sf)__hi, 0, 1, 2, 3, 4, 5, 6, 7);
}
+/// \brief Constructs a 256-bit floating-point vector of [4 x double] by
+/// concatenating two 128-bit floating-point vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __hi
+/// A 128-bit floating-point vector of [2 x double] to be copied to the upper
+/// 128 bits of the result.
+/// \param __lo
+/// A 128-bit floating-point vector of [2 x double] to be copied to the lower
+/// 128 bits of the result.
+/// \returns A 256-bit floating-point vector of [4 x double] containing the
+/// concatenated result.
static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_set_m128d (__m128d __hi, __m128d __lo) {
+_mm256_set_m128d (__m128d __hi, __m128d __lo)
+{
return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
}
+/// \brief Constructs a 256-bit integer vector by concatenating two 128-bit
+/// integer vectors.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __hi
+/// A 128-bit integer vector to be copied to the upper 128 bits of the
+/// result.
+/// \param __lo
+/// A 128-bit integer vector to be copied to the lower 128 bits of the
+/// result.
+/// \returns A 256-bit integer vector containing the concatenated result.
static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_set_m128i (__m128i __hi, __m128i __lo) {
+_mm256_set_m128i (__m128i __hi, __m128i __lo)
+{
return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
}
+/// \brief Constructs a 256-bit floating-point vector of [8 x float] by
+/// concatenating two 128-bit floating-point vectors of [4 x float]. This is
+/// similar to _mm256_set_m128, but the order of the input parameters is
+/// swapped.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __lo
+/// A 128-bit floating-point vector of [4 x float] to be copied to the lower
+/// 128 bits of the result.
+/// \param __hi
+/// A 128-bit floating-point vector of [4 x float] to be copied to the upper
+/// 128 bits of the result.
+/// \returns A 256-bit floating-point vector of [8 x float] containing the
+/// concatenated result.
static __inline __m256 __DEFAULT_FN_ATTRS
-_mm256_setr_m128 (__m128 __lo, __m128 __hi) {
+_mm256_setr_m128 (__m128 __lo, __m128 __hi)
+{
return _mm256_set_m128(__hi, __lo);
}
+/// \brief Constructs a 256-bit floating-point vector of [4 x double] by
+/// concatenating two 128-bit floating-point vectors of [2 x double]. This is
+/// similar to _mm256_set_m128d, but the order of the input parameters is
+/// swapped.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __lo
+/// A 128-bit floating-point vector of [2 x double] to be copied to the lower
+/// 128 bits of the result.
+/// \param __hi
+/// A 128-bit floating-point vector of [2 x double] to be copied to the upper
+/// 128 bits of the result.
+/// \returns A 256-bit floating-point vector of [4 x double] containing the
+/// concatenated result.
static __inline __m256d __DEFAULT_FN_ATTRS
-_mm256_setr_m128d (__m128d __lo, __m128d __hi) {
+_mm256_setr_m128d (__m128d __lo, __m128d __hi)
+{
return (__m256d)_mm256_set_m128((__m128)__hi, (__m128)__lo);
}
+/// \brief Constructs a 256-bit integer vector by concatenating two 128-bit
+/// integer vectors. This is similar to _mm256_set_m128i, but the order of
+/// the input parameters is swapped.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VINSERTF128 </c> instruction.
+///
+/// \param __lo
+/// A 128-bit integer vector to be copied to the lower 128 bits of the
+/// result.
+/// \param __hi
+/// A 128-bit integer vector to be copied to the upper 128 bits of the
+/// result.
+/// \returns A 256-bit integer vector containing the concatenated result.
static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_setr_m128i (__m128i __lo, __m128i __hi) {
+_mm256_setr_m128i (__m128i __lo, __m128i __hi)
+{
return (__m256i)_mm256_set_m128((__m128)__hi, (__m128)__lo);
}
diff --git a/lib/Headers/bmiintrin.h b/lib/Headers/bmiintrin.h
index 30acfaeb9f3b..488eb2dbd3d4 100644
--- a/lib/Headers/bmiintrin.h
+++ b/lib/Headers/bmiintrin.h
@@ -36,7 +36,7 @@
/// unsigned short _tzcnt_u16(unsigned short a);
/// \endcode
///
-/// This intrinsic corresponds to the \c TZCNT instruction.
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
///
/// \param a
/// An unsigned 16-bit integer whose trailing zeros are to be counted.
@@ -53,7 +53,7 @@
/// unsigned int _andn_u32(unsigned int a, unsigned int b);
/// \endcode
///
-/// This intrinsic corresponds to the \c ANDN instruction.
+/// This intrinsic corresponds to the <c> ANDN </c> instruction.
///
/// \param a
/// An unsigned integer containing one of the operands.
@@ -73,7 +73,7 @@
/// unsigned int _blsi_u32(unsigned int a);
/// \endcode
///
-/// This intrinsic corresponds to the \c BLSI instruction.
+/// This intrinsic corresponds to the <c> BLSI </c> instruction.
///
/// \param a
/// An unsigned integer whose bits are to be cleared.
@@ -91,7 +91,7 @@
/// unsigned int _blsmsk_u32(unsigned int a);
/// \endcode
///
-/// This intrinsic corresponds to the \c BLSMSK instruction.
+/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
///
/// \param a
/// An unsigned integer used to create the mask.
@@ -107,7 +107,7 @@
/// unsigned int _blsr_u32(unsigned int a);
/// \endcode
///
-/// This intrinsic corresponds to the \c BLSR instruction.
+/// This intrinsic corresponds to the <c> BLSR </c> instruction.
///
/// \param a
/// An unsigned integer containing the operand to be cleared.
@@ -123,7 +123,7 @@
/// unsigned int _tzcnt_u32(unsigned int a);
/// \endcode
///
-/// This intrinsic corresponds to the \c TZCNT instruction.
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
///
/// \param a
/// An unsigned 32-bit integer whose trailing zeros are to be counted.
@@ -143,7 +143,7 @@
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c TZCNT instruction.
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
///
/// \param __X
/// An unsigned 16-bit integer whose trailing zeros are to be counted.
@@ -160,7 +160,7 @@ __tzcnt_u16(unsigned short __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c ANDN instruction.
+/// This intrinsic corresponds to the <c> ANDN </c> instruction.
///
/// \param __X
/// An unsigned integer containing one of the operands.
@@ -180,7 +180,7 @@ __andn_u32(unsigned int __X, unsigned int __Y)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c BEXTR instruction.
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
///
/// \param __X
/// An unsigned integer whose bits are to be extracted.
@@ -202,7 +202,7 @@ __bextr_u32(unsigned int __X, unsigned int __Y)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c BEXTR instruction.
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
///
/// \param __X
/// An unsigned integer whose bits are to be extracted.
@@ -225,7 +225,7 @@ _bextr_u32(unsigned int __X, unsigned int __Y, unsigned int __Z)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c BLSI instruction.
+/// This intrinsic corresponds to the <c> BLSI </c> instruction.
///
/// \param __X
/// An unsigned integer whose bits are to be cleared.
@@ -243,7 +243,7 @@ __blsi_u32(unsigned int __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c BLSMSK instruction.
+/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
///
/// \param __X
/// An unsigned integer used to create the mask.
@@ -259,7 +259,7 @@ __blsmsk_u32(unsigned int __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c BLSR instruction.
+/// This intrinsic corresponds to the <c> BLSR </c> instruction.
///
/// \param __X
/// An unsigned integer containing the operand to be cleared.
@@ -275,7 +275,7 @@ __blsr_u32(unsigned int __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c TZCNT instruction.
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
///
/// \param __X
/// An unsigned 32-bit integer whose trailing zeros are to be counted.
@@ -291,12 +291,12 @@ __tzcnt_u32(unsigned int __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c TZCNT instruction.
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
///
/// \param __X
/// An unsigned 32-bit integer whose trailing zeros are to be counted.
-/// \returns An 32-bit integer containing the number of trailing zero
-/// bits in the operand.
+/// \returns An 32-bit integer containing the number of trailing zero bits in
+/// the operand.
static __inline__ int __RELAXED_FN_ATTRS
_mm_tzcnt_32(unsigned int __X)
{
@@ -314,7 +314,7 @@ _mm_tzcnt_32(unsigned int __X)
/// unsigned long long _andn_u64 (unsigned long long a, unsigned long long b);
/// \endcode
///
-/// This intrinsic corresponds to the \c ANDN instruction.
+/// This intrinsic corresponds to the <c> ANDN </c> instruction.
///
/// \param a
/// An unsigned 64-bit integer containing one of the operands.
@@ -334,7 +334,7 @@ _mm_tzcnt_32(unsigned int __X)
/// unsigned long long _blsi_u64(unsigned long long a);
/// \endcode
///
-/// This intrinsic corresponds to the \c BLSI instruction.
+/// This intrinsic corresponds to the <c> BLSI </c> instruction.
///
/// \param a
/// An unsigned 64-bit integer whose bits are to be cleared.
@@ -352,7 +352,7 @@ _mm_tzcnt_32(unsigned int __X)
/// unsigned long long _blsmsk_u64(unsigned long long a);
/// \endcode
///
-/// This intrinsic corresponds to the \c BLSMSK instruction.
+/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
///
/// \param a
/// An unsigned 64-bit integer used to create the mask.
@@ -368,7 +368,7 @@ _mm_tzcnt_32(unsigned int __X)
/// unsigned long long _blsr_u64(unsigned long long a);
/// \endcode
///
-/// This intrinsic corresponds to the \c BLSR instruction.
+/// This intrinsic corresponds to the <c> BLSR </c> instruction.
///
/// \param a
/// An unsigned 64-bit integer containing the operand to be cleared.
@@ -384,7 +384,7 @@ _mm_tzcnt_32(unsigned int __X)
/// unsigned long long _tzcnt_u64(unsigned long long a);
/// \endcode
///
-/// This intrinsic corresponds to the \c TZCNT instruction.
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
///
/// \param a
/// An unsigned 64-bit integer whose trailing zeros are to be counted.
@@ -397,7 +397,7 @@ _mm_tzcnt_32(unsigned int __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c ANDN instruction.
+/// This intrinsic corresponds to the <c> ANDN </c> instruction.
///
/// \param __X
/// An unsigned 64-bit integer containing one of the operands.
@@ -417,7 +417,7 @@ __andn_u64 (unsigned long long __X, unsigned long long __Y)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c BEXTR instruction.
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose bits are to be extracted.
@@ -439,7 +439,7 @@ __bextr_u64(unsigned long long __X, unsigned long long __Y)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c BEXTR instruction.
+/// This intrinsic corresponds to the <c> BEXTR </c> instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose bits are to be extracted.
@@ -462,7 +462,7 @@ _bextr_u64(unsigned long long __X, unsigned int __Y, unsigned int __Z)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c BLSI instruction.
+/// This intrinsic corresponds to the <c> BLSI </c> instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose bits are to be cleared.
@@ -480,7 +480,7 @@ __blsi_u64(unsigned long long __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c BLSMSK instruction.
+/// This intrinsic corresponds to the <c> BLSMSK </c> instruction.
///
/// \param __X
/// An unsigned 64-bit integer used to create the mask.
@@ -496,7 +496,7 @@ __blsmsk_u64(unsigned long long __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c BLSR instruction.
+/// This intrinsic corresponds to the <c> BLSR </c> instruction.
///
/// \param __X
/// An unsigned 64-bit integer containing the operand to be cleared.
@@ -512,7 +512,7 @@ __blsr_u64(unsigned long long __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c TZCNT instruction.
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose trailing zeros are to be counted.
@@ -528,12 +528,12 @@ __tzcnt_u64(unsigned long long __X)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c TZCNT instruction.
+/// This intrinsic corresponds to the <c> TZCNT </c> instruction.
///
/// \param __X
/// An unsigned 64-bit integer whose trailing zeros are to be counted.
-/// \returns An 64-bit integer containing the number of trailing zero
-/// bits in the operand.
+/// \returns An 64-bit integer containing the number of trailing zero bits in
+/// the operand.
static __inline__ long long __RELAXED_FN_ATTRS
_mm_tzcnt_64(unsigned long long __X)
{
diff --git a/lib/Headers/cuda_wrappers/algorithm b/lib/Headers/cuda_wrappers/algorithm
new file mode 100644
index 000000000000..95d9beb73c68
--- /dev/null
+++ b/lib/Headers/cuda_wrappers/algorithm
@@ -0,0 +1,96 @@
+/*===---- complex - CUDA wrapper for <algorithm> ----------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_CUDA_WRAPPERS_ALGORITHM
+#define __CLANG_CUDA_WRAPPERS_ALGORITHM
+
+// This header defines __device__ overloads of std::min/max, but only if we're
+// <= C++11. In C++14, these functions are constexpr, and so are implicitly
+// __host__ __device__.
+//
+// We don't support the initializer_list overloads because
+// initializer_list::begin() and end() are not __host__ __device__ functions.
+//
+// When compiling in C++14 mode, we could force std::min/max to have different
+// implementations for host and device, by declaring the device overloads
+// before the constexpr overloads appear. We choose not to do this because
+
+// a) why write our own implementation when we can use one from the standard
+// library? and
+// b) libstdc++ is evil and declares min/max inside a header that is included
+// *before* we include <algorithm>. So we'd have to unconditionally
+// declare our __device__ overloads of min/max, but that would pollute
+// things for people who choose not to include <algorithm>.
+
+#include_next <algorithm>
+
+#if __cplusplus <= 201103L
+
+// We need to define these overloads in exactly the namespace our standard
+// library uses (including the right inline namespace), otherwise they won't be
+// picked up by other functions in the standard library (e.g. functions in
+// <complex>). Thus the ugliness below.
+#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
+_LIBCPP_BEGIN_NAMESPACE_STD
+#else
+namespace std {
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_BEGIN_NAMESPACE_VERSION
+#endif
+#endif
+
+template <class __T, class __Cmp>
+inline __device__ const __T &
+max(const __T &__a, const __T &__b, __Cmp __cmp) {
+ return __cmp(__a, __b) ? __b : __a;
+}
+
+template <class __T>
+inline __device__ const __T &
+max(const __T &__a, const __T &__b) {
+ return __a < __b ? __b : __a;
+}
+
+template <class __T, class __Cmp>
+inline __device__ const __T &
+min(const __T &__a, const __T &__b, __Cmp __cmp) {
+ return __cmp(__b, __a) ? __b : __a;
+}
+
+template <class __T>
+inline __device__ const __T &
+min(const __T &__a, const __T &__b) {
+ return __a < __b ? __b : __a;
+}
+
+#ifdef _LIBCPP_END_NAMESPACE_STD
+_LIBCPP_END_NAMESPACE_STD
+#else
+#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
+_GLIBCXX_END_NAMESPACE_VERSION
+#endif
+} // namespace std
+#endif
+
+#endif // __cplusplus <= 201103L
+#endif // __CLANG_CUDA_WRAPPERS_ALGORITHM
diff --git a/lib/Headers/cuda_wrappers/complex b/lib/Headers/cuda_wrappers/complex
new file mode 100644
index 000000000000..11d40a82a8f6
--- /dev/null
+++ b/lib/Headers/cuda_wrappers/complex
@@ -0,0 +1,82 @@
+/*===---- complex - CUDA wrapper for <complex> ------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_CUDA_WRAPPERS_COMPLEX
+#define __CLANG_CUDA_WRAPPERS_COMPLEX
+
+// Wrapper around <complex> that forces its functions to be __host__
+// __device__.
+
+// First, include host-only headers we think are likely to be included by
+// <complex>, so that the pragma below only applies to <complex> itself.
+#if __cplusplus >= 201103L
+#include <type_traits>
+#endif
+#include <stdexcept>
+#include <cmath>
+#include <sstream>
+
+// Next, include our <algorithm> wrapper, to ensure that device overloads of
+// std::min/max are available.
+#include <algorithm>
+
+#pragma clang force_cuda_host_device begin
+
+// When compiling for device, ask libstdc++ to use its own implements of
+// complex functions, rather than calling builtins (which resolve to library
+// functions that don't exist when compiling CUDA device code).
+//
+// This is a little dicey, because it causes libstdc++ to define a different
+// set of overloads on host and device.
+//
+// // Present only when compiling for host.
+// __host__ __device__ void complex<float> sin(const complex<float>& x) {
+// return __builtin_csinf(x);
+// }
+//
+// // Present when compiling for host and for device.
+// template <typename T>
+// void __host__ __device__ complex<T> sin(const complex<T>& x) {
+// return complex<T>(sin(x.real()) * cosh(x.imag()),
+// cos(x.real()), sinh(x.imag()));
+// }
+//
+// This is safe because when compiling for device, all function calls in
+// __host__ code to sin() will still resolve to *something*, even if they don't
+// resolve to the same function as they resolve to when compiling for host. We
+// don't care that they don't resolve to the right function because we won't
+// codegen this host code when compiling for device.
+
+#pragma push_macro("_GLIBCXX_USE_C99_COMPLEX")
+#pragma push_macro("_GLIBCXX_USE_C99_COMPLEX_TR1")
+#define _GLIBCXX_USE_C99_COMPLEX 0
+#define _GLIBCXX_USE_C99_COMPLEX_TR1 0
+
+#include_next <complex>
+
+#pragma pop_macro("_GLIBCXX_USE_C99_COMPLEX_TR1")
+#pragma pop_macro("_GLIBCXX_USE_C99_COMPLEX")
+
+#pragma clang force_cuda_host_device end
+
+#endif // include guard
diff --git a/lib/Headers/cuda_wrappers/new b/lib/Headers/cuda_wrappers/new
new file mode 100644
index 000000000000..b77131af0e5b
--- /dev/null
+++ b/lib/Headers/cuda_wrappers/new
@@ -0,0 +1,47 @@
+/*===---- complex - CUDA wrapper for <new> ------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __CLANG_CUDA_WRAPPERS_NEW
+#define __CLANG_CUDA_WRAPPERS_NEW
+
+#include_next <new>
+
+// Device overrides for placement new and delete.
+#pragma push_macro("CUDA_NOEXCEPT")
+#if __cplusplus >= 201103L
+#define CUDA_NOEXCEPT noexcept
+#else
+#define CUDA_NOEXCEPT
+#endif
+
+__device__ inline void *operator new(__SIZE_TYPE__, void *__ptr) CUDA_NOEXCEPT {
+ return __ptr;
+}
+__device__ inline void *operator new[](__SIZE_TYPE__, void *__ptr) CUDA_NOEXCEPT {
+ return __ptr;
+}
+__device__ inline void operator delete(void *, void *) CUDA_NOEXCEPT {}
+__device__ inline void operator delete[](void *, void *) CUDA_NOEXCEPT {}
+#pragma pop_macro("CUDA_NOEXCEPT")
+
+#endif // include guard
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index 70d6d726110a..1512f9f0b47b 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -49,6 +49,21 @@ typedef signed char __v16qs __attribute__((__vector_size__(16)));
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+/// \brief Adds lower double-precision values in both operands and returns the
+/// sum in the lower 64 bits of the result. The upper 64 bits of the result
+/// are copied from the upper double-precision value of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VADDSD / ADDSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
+/// sum of the lower 64 bits of both operands. The upper 64 bits are copied
+/// from the upper 64 bits of the first source operand.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_add_sd(__m128d __a, __m128d __b)
{
@@ -56,12 +71,41 @@ _mm_add_sd(__m128d __a, __m128d __b)
return __a;
}
+/// \brief Adds two 128-bit vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VADDPD / ADDPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \returns A 128-bit vector of [2 x double] containing the sums of both
+/// operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_add_pd(__m128d __a, __m128d __b)
{
return (__m128d)((__v2df)__a + (__v2df)__b);
}
+/// \brief Subtracts the lower double-precision value of the second operand
+/// from the lower double-precision value of the first operand and returns
+/// the difference in the lower 64 bits of the result. The upper 64 bits of
+/// the result are copied from the upper double-precision value of the first
+/// operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VSUBSD / SUBSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the minuend.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing the subtrahend.
+/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
+/// difference of the lower 64 bits of both operands. The upper 64 bits are
+/// copied from the upper 64 bits of the first source operand.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_sub_sd(__m128d __a, __m128d __b)
{
@@ -69,12 +113,40 @@ _mm_sub_sd(__m128d __a, __m128d __b)
return __a;
}
+/// \brief Subtracts two 128-bit vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VSUBPD / SUBPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the minuend.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing the subtrahend.
+/// \returns A 128-bit vector of [2 x double] containing the differences between
+/// both operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_sub_pd(__m128d __a, __m128d __b)
{
return (__m128d)((__v2df)__a - (__v2df)__b);
}
+/// \brief Multiplies lower double-precision values in both operands and returns
+/// the product in the lower 64 bits of the result. The upper 64 bits of the
+/// result are copied from the upper double-precision value of the first
+/// operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMULSD / MULSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
+/// product of the lower 64 bits of both operands. The upper 64 bits are
+/// copied from the upper 64 bits of the first source operand.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mul_sd(__m128d __a, __m128d __b)
{
@@ -82,12 +154,41 @@ _mm_mul_sd(__m128d __a, __m128d __b)
return __a;
}
+/// \brief Multiplies two 128-bit vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMULPD / MULPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the operands.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the operands.
+/// \returns A 128-bit vector of [2 x double] containing the products of both
+/// operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_mul_pd(__m128d __a, __m128d __b)
{
return (__m128d)((__v2df)__a * (__v2df)__b);
}
+/// \brief Divides the lower double-precision value of the first operand by the
+/// lower double-precision value of the second operand and returns the
+/// quotient in the lower 64 bits of the result. The upper 64 bits of the
+/// result are copied from the upper double-precision value of the first
+/// operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDIVSD / DIVSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the dividend.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing divisor.
+/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
+/// quotient of the lower 64 bits of both operands. The upper 64 bits are
+/// copied from the upper 64 bits of the first source operand.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_div_sd(__m128d __a, __m128d __b)
{
@@ -95,12 +196,44 @@ _mm_div_sd(__m128d __a, __m128d __b)
return __a;
}
+/// \brief Performs an element-by-element division of two 128-bit vectors of
+/// [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VDIVPD / DIVPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the dividend.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing the divisor.
+/// \returns A 128-bit vector of [2 x double] containing the quotients of both
+/// operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_div_pd(__m128d __a, __m128d __b)
{
return (__m128d)((__v2df)__a / (__v2df)__b);
}
+/// \brief Calculates the square root of the lower double-precision value of
+/// the second operand and returns it in the lower 64 bits of the result.
+/// The upper 64 bits of the result are copied from the upper double-
+/// precision value of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VSQRTSD / SQRTSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the operands. The
+/// upper 64 bits of this operand are copied to the upper 64 bits of the
+/// result.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the operands. The
+/// square root is calculated using the lower 64 bits of this operand.
+/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
+/// square root of the lower 64 bits of operand \a __b, and whose upper 64
+/// bits are copied from the upper 64 bits of operand \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_sqrt_sd(__m128d __a, __m128d __b)
{
@@ -108,150 +241,518 @@ _mm_sqrt_sd(__m128d __a, __m128d __b)
return (__m128d) { __c[0], __a[1] };
}
+/// \brief Calculates the square root of the each of two values stored in a
+/// 128-bit vector of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VSQRTPD / SQRTPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector of [2 x double] containing the square roots of the
+/// values in the operand.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_sqrt_pd(__m128d __a)
{
return __builtin_ia32_sqrtpd((__v2df)__a);
}
+/// \brief Compares lower 64-bit double-precision values of both operands, and
+/// returns the lesser of the pair of values in the lower 64-bits of the
+/// result. The upper 64 bits of the result are copied from the upper double-
+/// precision value of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMINSD / MINSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the operands. The
+/// lower 64 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the operands. The
+/// lower 64 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
+/// minimum value between both operands. The upper 64 bits are copied from
+/// the upper 64 bits of the first source operand.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_min_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
}
+/// \brief Performs element-by-element comparison of the two 128-bit vectors of
+/// [2 x double] and returns the vector containing the lesser of each pair of
+/// values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMINPD / MINPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the operands.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the operands.
+/// \returns A 128-bit vector of [2 x double] containing the minimum values
+/// between both operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_min_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares lower 64-bits double-precision values of both operands, and
+/// returns the greater of the pair of values in the lower 64-bits of the
+/// result. The upper 64 bits of the result are copied from the upper double-
+/// precision value of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMAXSD / MAXSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the operands. The
+/// lower 64 bits of this operand are used in the comparison.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the operands. The
+/// lower 64 bits of this operand are used in the comparison.
+/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
+/// maximum value between both operands. The upper 64 bits are copied from
+/// the upper 64 bits of the first source operand.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_max_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
}
+/// \brief Performs element-by-element comparison of the two 128-bit vectors of
+/// [2 x double] and returns the vector containing the greater of each pair
+/// of values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMAXPD / MAXPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the operands.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the operands.
+/// \returns A 128-bit vector of [2 x double] containing the maximum values
+/// between both operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_max_pd(__m128d __a, __m128d __b)
{
return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Performs a bitwise AND of two 128-bit vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPAND / PAND </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
+/// values between both operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_and_pd(__m128d __a, __m128d __b)
{
- return (__m128d)((__v4su)__a & (__v4su)__b);
+ return (__m128d)((__v2du)__a & (__v2du)__b);
}
+/// \brief Performs a bitwise AND of two 128-bit vectors of [2 x double], using
+/// the one's complement of the values contained in the first source operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPANDN / PANDN </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the left source operand. The
+/// one's complement of this value is used in the bitwise AND.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing the right source operand.
+/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
+/// values in the second operand and the one's complement of the first
+/// operand.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_andnot_pd(__m128d __a, __m128d __b)
{
- return (__m128d)(~(__v4su)__a & (__v4su)__b);
+ return (__m128d)(~(__v2du)__a & (__v2du)__b);
}
+/// \brief Performs a bitwise OR of two 128-bit vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPOR / POR </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the
+/// values between both operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_or_pd(__m128d __a, __m128d __b)
{
- return (__m128d)((__v4su)__a | (__v4su)__b);
+ return (__m128d)((__v2du)__a | (__v2du)__b);
}
+/// \brief Performs a bitwise XOR of two 128-bit vectors of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPXOR / PXOR </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \param __b
+/// A 128-bit vector of [2 x double] containing one of the source operands.
+/// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the
+/// values between both operands.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_xor_pd(__m128d __a, __m128d __b)
{
- return (__m128d)((__v4su)__a ^ (__v4su)__b);
+ return (__m128d)((__v2du)__a ^ (__v2du)__b);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] for equality. Each comparison yields 0h
+/// for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPEQPD / CMPEQPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpeq_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are less than those in the second operand. Each comparison
+/// yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPLTPD / CMPLTPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmplt_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are less than or equal to those in the second operand. Each
+/// comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPLEPD / CMPLEPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmple_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are greater than those in the second operand. Each comparison
+/// yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPLTPD / CMPLTPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpgt_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are greater than or equal to those in the second operand. Each
+/// comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPLEPD / CMPLEPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpge_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are ordered with respect to those in the second operand. A pair
+/// of double-precision values are "ordered" with respect to each other if
+/// neither value is a NaN. Each comparison yields 0h for false,
+/// FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPORDPD / CMPORDPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpord_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are unordered with respect to those in the second operand. A pair
+/// of double-precision values are "unordered" with respect to each other if
+/// one or both values are NaN. Each comparison yields 0h for false,
+/// FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPUNORDPD / CMPUNORDPD </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpunord_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are unequal to those in the second operand. Each comparison
+/// yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPNEQPD / CMPNEQPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpneq_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are not less than those in the second operand. Each comparison
+/// yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPNLTPD / CMPNLTPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpnlt_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are not less than or equal to those in the second operand. Each
+/// comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPNLEPD / CMPNLEPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpnle_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are not greater than those in the second operand. Each
+/// comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPNLTPD / CMPNLTPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpngt_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
}
+/// \brief Compares each of the corresponding double-precision values of the
+/// 128-bit vectors of [2 x double] to determine if the values in the first
+/// operand are not greater than or equal to those in the second operand.
+/// Each comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPNLEPD / CMPNLEPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \param __b
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector containing the comparison results.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpnge_pd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] for equality. The
+/// comparison yields 0h for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPEQSD / CMPEQSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpeq_sd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is less than the corresponding value in
+/// the second parameter. The comparison yields 0h for false,
+/// FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPLTSD / CMPLTSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmplt_sd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is less than or equal to the
+/// corresponding value in the second parameter. The comparison yields 0h for
+/// false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPLESD / CMPLESD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmple_sd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is greater than the corresponding value
+/// in the second parameter. The comparison yields 0h for false,
+/// FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPLTSD / CMPLTSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpgt_sd(__m128d __a, __m128d __b)
{
@@ -259,6 +760,24 @@ _mm_cmpgt_sd(__m128d __a, __m128d __b)
return (__m128d) { __c[0], __a[1] };
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is greater than or equal to the
+/// corresponding value in the second parameter. The comparison yields 0h for
+/// false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPLESD / CMPLESD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpge_sd(__m128d __a, __m128d __b)
{
@@ -266,36 +785,147 @@ _mm_cmpge_sd(__m128d __a, __m128d __b)
return (__m128d) { __c[0], __a[1] };
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is "ordered" with respect to the
+/// corresponding value in the second parameter. The comparison yields 0h for
+/// false, FFFFFFFFFFFFFFFFh for true. A pair of double-precision values are
+/// "ordered" with respect to each other if neither value is a NaN.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPORDSD / CMPORDSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpord_sd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is "unordered" with respect to the
+/// corresponding value in the second parameter. The comparison yields 0h
+/// for false, FFFFFFFFFFFFFFFFh for true. A pair of double-precision values
+/// are "unordered" with respect to each other if one or both values are NaN.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPUNORDSD / CMPUNORDSD </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpunord_sd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is unequal to the corresponding value in
+/// the second parameter. The comparison yields 0h for false,
+/// FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPNEQSD / CMPNEQSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpneq_sd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is not less than the corresponding
+/// value in the second parameter. The comparison yields 0h for false,
+/// FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPNLTSD / CMPNLTSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpnlt_sd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is not less than or equal to the
+/// corresponding value in the second parameter. The comparison yields 0h
+/// for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPNLESD / CMPNLESD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpnle_sd(__m128d __a, __m128d __b)
{
return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is not greater than the corresponding
+/// value in the second parameter. The comparison yields 0h for false,
+/// FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPNLTSD / CMPNLTSD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpngt_sd(__m128d __a, __m128d __b)
{
@@ -303,6 +933,24 @@ _mm_cmpngt_sd(__m128d __a, __m128d __b)
return (__m128d) { __c[0], __a[1] };
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is not greater than or equal to the
+/// corresponding value in the second parameter. The comparison yields 0h
+/// for false, FFFFFFFFFFFFFFFFh for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCMPNLESD / CMPNLESD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns A 128-bit vector. The lower 64 bits contains the comparison
+/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cmpnge_sd(__m128d __a, __m128d __b)
{
@@ -310,84 +958,317 @@ _mm_cmpnge_sd(__m128d __a, __m128d __b)
return (__m128d) { __c[0], __a[1] };
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] for equality. The
+/// comparison yields 0 for false, 1 for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comieq_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is less than the corresponding value in
+/// the second parameter. The comparison yields 0 for false, 1 for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comilt_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is less than or equal to the
+/// corresponding value in the second parameter. The comparison yields 0 for
+/// false, 1 for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comile_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is greater than the corresponding value
+/// in the second parameter. The comparison yields 0 for false, 1 for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comigt_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is greater than or equal to the
+/// corresponding value in the second parameter. The comparison yields 0 for
+/// false, 1 for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comige_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is unequal to the corresponding value in
+/// the second parameter. The comparison yields 0 for false, 1 for true.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCOMISD / COMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_comineq_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] for equality. The
+/// comparison yields 0 for false, 1 for true. If either of the two lower
+/// double-precision values is NaN, 1 is returned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 1 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomieq_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is less than the corresponding value in
+/// the second parameter. The comparison yields 0 for false, 1 for true. If
+/// either of the two lower double-precision values is NaN, 1 is returned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 1 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomilt_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is less than or equal to the
+/// corresponding value in the second parameter. The comparison yields 0 for
+/// false, 1 for true. If either of the two lower double-precision values is
+/// NaN, 1 is returned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 1 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomile_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is greater than the corresponding value
+/// in the second parameter. The comparison yields 0 for false, 1 for true.
+/// If either of the two lower double-precision values is NaN, 0 is returned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomigt_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is greater than or equal to the
+/// corresponding value in the second parameter. The comparison yields 0 for
+/// false, 1 for true. If either of the two lower double-precision values
+/// is NaN, 0 is returned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison results. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomige_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
}
+/// \brief Compares the lower double-precision floating-point values in each of
+/// the two 128-bit floating-point vectors of [2 x double] to determine if
+/// the value in the first parameter is unequal to the corresponding value in
+/// the second parameter. The comparison yields 0 for false, 1 for true. If
+/// either of the two lower double-precision values is NaN, 0 is returned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUCOMISD / UCOMISD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __b.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision value is
+/// compared to the lower double-precision value of \a __a.
+/// \returns An integer containing the comparison result. If either of the two
+/// lower double-precision values is NaN, 0 is returned.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_ucomineq_sd(__m128d __a, __m128d __b)
{
return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
}
+/// \brief Converts the two double-precision floating-point elements of a
+/// 128-bit vector of [2 x double] into two single-precision floating-point
+/// values, returned in the lower 64 bits of a 128-bit vector of [4 x float].
+/// The upper 64 bits of the result vector are set to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTPD2PS / CVTPD2PS </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
+/// converted values. The upper 64 bits are set to zero.
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_cvtpd_ps(__m128d __a)
{
return __builtin_ia32_cvtpd2ps((__v2df)__a);
}
+/// \brief Converts the lower two single-precision floating-point elements of a
+/// 128-bit vector of [4 x float] into two double-precision floating-point
+/// values, returned in a 128-bit vector of [2 x double]. The upper two
+/// elements of the input vector are unused.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTPS2PD / CVTPS2PD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The lower two single-precision
+/// floating-point elements are converted to double-precision values. The
+/// upper two elements are unused.
+/// \returns A 128-bit vector of [2 x double] containing the converted values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cvtps_pd(__m128 __a)
{
@@ -395,6 +1276,19 @@ _mm_cvtps_pd(__m128 __a)
__builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
}
+/// \brief Converts the lower two integer elements of a 128-bit vector of
+/// [4 x i32] into two double-precision floating-point values, returned in a
+/// 128-bit vector of [2 x double]. The upper two elements of the input
+/// vector are unused.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTDQ2PD / CVTDQ2PD </c> instruction.
+///
+/// \param __a
+/// A 128-bit integer vector of [4 x i32]. The lower two integer elements are
+/// converted to double-precision values. The upper two elements are unused.
+/// \returns A 128-bit vector of [2 x double] containing the converted values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cvtepi32_pd(__m128i __a)
{
@@ -402,24 +1296,84 @@ _mm_cvtepi32_pd(__m128i __a)
__builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
}
+/// \brief Converts the two double-precision floating-point elements of a
+/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
+/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. The upper
+/// 64 bits of the result vector are set to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTPD2DQ / CVTPD2DQ </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
+/// converted values. The upper 64 bits are set to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvtpd_epi32(__m128d __a)
{
return __builtin_ia32_cvtpd2dq((__v2df)__a);
}
+/// \brief Converts the low-order element of a 128-bit vector of [2 x double]
+/// into a 32-bit signed integer value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
+/// conversion.
+/// \returns A 32-bit signed integer containing the converted value.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_cvtsd_si32(__m128d __a)
{
return __builtin_ia32_cvtsd2si((__v2df)__a);
}
+/// \brief Converts the lower double-precision floating-point element of a
+/// 128-bit vector of [2 x double], in the second parameter, into a
+/// single-precision floating-point value, returned in the lower 32 bits of a
+/// 128-bit vector of [4 x float]. The upper 96 bits of the result vector are
+/// copied from the upper 96 bits of the first parameter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTSD2SS / CVTSD2SS </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x float]. The upper 96 bits of this parameter are
+/// copied to the upper 96 bits of the result.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower double-precision
+/// floating-point element is used in the conversion.
+/// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the
+/// converted value from the second parameter. The upper 96 bits are copied
+/// from the upper 96 bits of the first parameter.
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_cvtsd_ss(__m128 __a, __m128d __b)
{
return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);
}
+/// \brief Converts a 32-bit signed integer value, in the second parameter, into
+/// a double-precision floating-point value, returned in the lower 64 bits of
+/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
+/// are copied from the upper 64 bits of the first parameter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTSI2SD / CVTSI2SD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are
+/// copied to the upper 64 bits of the result.
+/// \param __b
+/// A 32-bit signed integer containing the value to be converted.
+/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
+/// converted value from the second parameter. The upper 64 bits are copied
+/// from the upper 64 bits of the first parameter.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cvtsi32_sd(__m128d __a, int __b)
{
@@ -427,6 +1381,25 @@ _mm_cvtsi32_sd(__m128d __a, int __b)
return __a;
}
+/// \brief Converts the lower single-precision floating-point element of a
+/// 128-bit vector of [4 x float], in the second parameter, into a
+/// double-precision floating-point value, returned in the lower 64 bits of
+/// a 128-bit vector of [2 x double]. The upper 64 bits of the result vector
+/// are copied from the upper 64 bits of the first parameter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTSS2SD / CVTSS2SD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The upper 64 bits of this parameter are
+/// copied to the upper 64 bits of the result.
+/// \param __b
+/// A 128-bit vector of [4 x float]. The lower single-precision
+/// floating-point element is used in the conversion.
+/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
+/// converted value from the second parameter. The upper 64 bits are copied
+/// from the upper 64 bits of the first parameter.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cvtss_sd(__m128d __a, __m128 __b)
{
@@ -434,48 +1407,145 @@ _mm_cvtss_sd(__m128d __a, __m128 __b)
return __a;
}
+/// \brief Converts the two double-precision floating-point elements of a
+/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
+/// returned in the lower 64 bits of a 128-bit vector of [4 x i32]. If the
+/// result of either conversion is inexact, the result is truncated (rounded
+/// towards zero) regardless of the current MXCSR setting. The upper 64 bits
+/// of the result vector are set to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTTPD2DQ / CVTTPD2DQ </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
+/// converted values. The upper 64 bits are set to zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_cvttpd_epi32(__m128d __a)
{
return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
}
+/// \brief Converts the low-order element of a [2 x double] vector into a 32-bit
+/// signed integer value, truncating the result when it is inexact.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTTSD2SI / CVTTSD2SI </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
+/// conversion.
+/// \returns A 32-bit signed integer containing the converted value.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_cvttsd_si32(__m128d __a)
{
return __builtin_ia32_cvttsd2si((__v2df)__a);
}
+/// \brief Converts the two double-precision floating-point elements of a
+/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
+/// returned in a 64-bit vector of [2 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CVTPD2PI </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \returns A 64-bit vector of [2 x i32] containing the converted values.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_cvtpd_pi32(__m128d __a)
{
return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
}
+/// \brief Converts the two double-precision floating-point elements of a
+/// 128-bit vector of [2 x double] into two signed 32-bit integer values,
+/// returned in a 64-bit vector of [2 x i32]. If the result of either
+/// conversion is inexact, the result is truncated (rounded towards zero)
+/// regardless of the current MXCSR setting.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CVTTPD2PI </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double].
+/// \returns A 64-bit vector of [2 x i32] containing the converted values.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_cvttpd_pi32(__m128d __a)
{
return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
}
+/// \brief Converts the two signed 32-bit integer elements of a 64-bit vector of
+/// [2 x i32] into two double-precision floating-point values, returned in a
+/// 128-bit vector of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CVTPI2PD </c> instruction.
+///
+/// \param __a
+/// A 64-bit vector of [2 x i32].
+/// \returns A 128-bit vector of [2 x double] containing the converted values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_cvtpi32_pd(__m64 __a)
{
return __builtin_ia32_cvtpi2pd((__v2si)__a);
}
+/// \brief Returns the low-order element of a 128-bit vector of [2 x double] as
+/// a double-precision floating-point value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The lower 64 bits are returned.
+/// \returns A double-precision floating-point value copied from the lower 64
+/// bits of \a __a.
static __inline__ double __DEFAULT_FN_ATTRS
_mm_cvtsd_f64(__m128d __a)
{
return __a[0];
}
+/// \brief Loads a 128-bit floating-point vector of [2 x double] from an aligned
+/// memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction.
+///
+/// \param __dp
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location has to be 16-byte aligned.
+/// \returns A 128-bit vector of [2 x double] containing the loaded values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_load_pd(double const *__dp)
{
return *(__m128d*)__dp;
}
+/// \brief Loads a double-precision floating-point value from a specified memory
+/// location and duplicates it to both vector elements of a 128-bit vector of
+/// [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVDDUP / MOVDDUP </c> instruction.
+///
+/// \param __dp
+/// A pointer to a memory location containing a double-precision value.
+/// \returns A 128-bit vector of [2 x double] containing the loaded and
+/// duplicated values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_load1_pd(double const *__dp)
{
@@ -488,6 +1558,20 @@ _mm_load1_pd(double const *__dp)
#define _mm_load_pd1(dp) _mm_load1_pd(dp)
+/// \brief Loads two double-precision values, in reverse order, from an aligned
+/// memory location into a 128-bit vector of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction +
+/// needed shuffling instructions. In AVX mode, the shuffling may be combined
+/// with the \c VMOVAPD, resulting in only a \c VPERMILPD instruction.
+///
+/// \param __dp
+/// A 16-byte aligned pointer to an array of double-precision values to be
+/// loaded in reverse order.
+/// \returns A 128-bit vector of [2 x double] containing the reversed loaded
+/// values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_loadr_pd(double const *__dp)
{
@@ -495,6 +1579,17 @@ _mm_loadr_pd(double const *__dp)
return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
}
+/// \brief Loads a 128-bit floating-point vector of [2 x double] from an
+/// unaligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVUPD / MOVUPD </c> instruction.
+///
+/// \param __dp
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \returns A 128-bit vector of [2 x double] containing the loaded values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_loadu_pd(double const *__dp)
{
@@ -524,6 +1619,23 @@ _mm_load_sd(double const *__dp)
return (__m128d){ __u, 0 };
}
+/// \brief Loads a double-precision value into the high-order bits of a 128-bit
+/// vector of [2 x double]. The low-order bits are copied from the low-order
+/// bits of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. \n
+/// Bits [63:0] are written to bits [63:0] of the result.
+/// \param __dp
+/// A pointer to a 64-bit memory location containing a double-precision
+/// floating-point value that is loaded. The loaded value is written to bits
+/// [127:64] of the result. The address of the memory location does not have
+/// to be aligned.
+/// \returns A 128-bit vector of [2 x double] containing the moved values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_loadh_pd(__m128d __a, double const *__dp)
{
@@ -534,6 +1646,23 @@ _mm_loadh_pd(__m128d __a, double const *__dp)
return (__m128d){ __a[0], __u };
}
+/// \brief Loads a double-precision value into the low-order bits of a 128-bit
+/// vector of [2 x double]. The high-order bits are copied from the
+/// high-order bits of the first operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. \n
+/// Bits [127:64] are written to bits [127:64] of the result.
+/// \param __dp
+/// A pointer to a 64-bit memory location containing a double-precision
+/// floating-point value that is loaded. The loaded value is written to bits
+/// [63:0] of the result. The address of the memory location does not have to
+/// be aligned.
+/// \returns A 128-bit vector of [2 x double] containing the moved values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_loadl_pd(__m128d __a, double const *__dp)
{
@@ -544,48 +1673,149 @@ _mm_loadl_pd(__m128d __a, double const *__dp)
return (__m128d){ __u, __a[1] };
}
+/// \brief Constructs a 128-bit floating-point vector of [2 x double] with
+/// unspecified content. This could be used as an argument to another
+/// intrinsic function where the argument is required but the value is not
+/// actually used.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \returns A 128-bit floating-point vector of [2 x double] with unspecified
+/// content.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_undefined_pd(void)
{
return (__m128d)__builtin_ia32_undef128();
}
+/// \brief Constructs a 128-bit floating-point vector of [2 x double]. The lower
+/// 64 bits of the vector are initialized with the specified double-precision
+/// floating-point value. The upper 64 bits are set to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
+///
+/// \param __w
+/// A double-precision floating-point value used to initialize the lower 64
+/// bits of the result.
+/// \returns An initialized 128-bit floating-point vector of [2 x double]. The
+/// lower 64 bits contain the value of the parameter. The upper 64 bits are
+/// set to zero.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_set_sd(double __w)
{
return (__m128d){ __w, 0 };
}
+/// \brief Constructs a 128-bit floating-point vector of [2 x double], with each
+/// of the two double-precision floating-point vector elements set to the
+/// specified double-precision floating-point value.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVDDUP / MOVLHPS </c> instruction.
+///
+/// \param __w
+/// A double-precision floating-point value used to initialize each vector
+/// element of the result.
+/// \returns An initialized 128-bit floating-point vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_set1_pd(double __w)
{
return (__m128d){ __w, __w };
}
+/// \brief Constructs a 128-bit floating-point vector of [2 x double]
+/// initialized with the specified double-precision floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
+///
+/// \param __w
+/// A double-precision floating-point value used to initialize the upper 64
+/// bits of the result.
+/// \param __x
+/// A double-precision floating-point value used to initialize the lower 64
+/// bits of the result.
+/// \returns An initialized 128-bit floating-point vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_set_pd(double __w, double __x)
{
return (__m128d){ __x, __w };
}
+/// \brief Constructs a 128-bit floating-point vector of [2 x double],
+/// initialized in reverse order with the specified double-precision
+/// floating-point values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
+///
+/// \param __w
+/// A double-precision floating-point value used to initialize the lower 64
+/// bits of the result.
+/// \param __x
+/// A double-precision floating-point value used to initialize the upper 64
+/// bits of the result.
+/// \returns An initialized 128-bit floating-point vector of [2 x double].
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_setr_pd(double __w, double __x)
{
return (__m128d){ __w, __x };
}
+/// \brief Constructs a 128-bit floating-point vector of [2 x double]
+/// initialized to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
+///
+/// \returns An initialized 128-bit floating-point vector of [2 x double] with
+/// all elements set to zero.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_setzero_pd(void)
{
return (__m128d){ 0, 0 };
}
+/// \brief Constructs a 128-bit floating-point vector of [2 x double]. The lower
+/// 64 bits are set to the lower 64 bits of the second parameter. The upper
+/// 64 bits are set to the upper 64 bits of the first parameter.
+//
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VBLENDPD / BLENDPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. The upper 64 bits are written to the
+/// upper 64 bits of the result.
+/// \param __b
+/// A 128-bit vector of [2 x double]. The lower 64 bits are written to the
+/// lower 64 bits of the result.
+/// \returns A 128-bit vector of [2 x double] containing the moved values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_move_sd(__m128d __a, __m128d __b)
{
return (__m128d){ __b[0], __a[1] };
}
+/// \brief Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
+/// memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVSD / MOVSD </c> instruction.
+///
+/// \param __dp
+/// A pointer to a 64-bit memory location.
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_store_sd(double *__dp, __m128d __a)
{
@@ -608,12 +1838,36 @@ _mm_store1_pd(double *__dp, __m128d __a)
_mm_store_pd(__dp, __a);
}
+/// \brief Stores a 128-bit vector of [2 x double] into an aligned memory
+/// location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVAPD / MOVAPD </c> instruction.
+///
+/// \param __dp
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location has to be 16-byte aligned.
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the values to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_store_pd1(double *__dp, __m128d __a)
{
return _mm_store1_pd(__dp, __a);
}
+/// \brief Stores a 128-bit vector of [2 x double] into an unaligned memory
+/// location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVUPD / MOVUPD </c> instruction.
+///
+/// \param __dp
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location does not have to be aligned.
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the values to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeu_pd(double *__dp, __m128d __a)
{
@@ -623,6 +1877,20 @@ _mm_storeu_pd(double *__dp, __m128d __a)
((struct __storeu_pd*)__dp)->__v = __a;
}
+/// \brief Stores two double-precision values, in reverse order, from a 128-bit
+/// vector of [2 x double] to a 16-byte aligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to a shuffling instruction followed by a
+/// <c> VMOVAPD / MOVAPD </c> instruction.
+///
+/// \param __dp
+/// A pointer to a 16-byte aligned memory location that can store two
+/// double-precision values.
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the values to be reversed and
+/// stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storer_pd(double *__dp, __m128d __a)
{
@@ -630,6 +1898,17 @@ _mm_storer_pd(double *__dp, __m128d __a)
*(__m128d *)__dp = __a;
}
+/// \brief Stores the upper 64 bits of a 128-bit vector of [2 x double] to a
+/// memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
+///
+/// \param __dp
+/// A pointer to a 64-bit memory location.
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeh_pd(double *__dp, __m128d __a)
{
@@ -639,6 +1918,17 @@ _mm_storeh_pd(double *__dp, __m128d __a)
((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
}
+/// \brief Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
+/// memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
+///
+/// \param __dp
+/// A pointer to a 64-bit memory location.
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storel_pd(double *__dp, __m128d __a)
{
@@ -648,127 +1938,391 @@ _mm_storel_pd(double *__dp, __m128d __a)
((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
}
+/// \brief Adds the corresponding elements of two 128-bit vectors of [16 x i8],
+/// saving the lower 8 bits of each sum in the corresponding element of a
+/// 128-bit result vector of [16 x i8]. The integer elements of both
+/// parameters can be either signed or unsigned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPADDB / PADDB </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [16 x i8].
+/// \param __b
+/// A 128-bit vector of [16 x i8].
+/// \returns A 128-bit vector of [16 x i8] containing the sums of both
+/// parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_add_epi8(__m128i __a, __m128i __b)
{
return (__m128i)((__v16qu)__a + (__v16qu)__b);
}
+/// \brief Adds the corresponding elements of two 128-bit vectors of [8 x i16],
+/// saving the lower 16 bits of each sum in the corresponding element of a
+/// 128-bit result vector of [8 x i16]. The integer elements of both
+/// parameters can be either signed or unsigned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPADDW / PADDW </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x i16].
+/// \param __b
+/// A 128-bit vector of [8 x i16].
+/// \returns A 128-bit vector of [8 x i16] containing the sums of both
+/// parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_add_epi16(__m128i __a, __m128i __b)
{
return (__m128i)((__v8hu)__a + (__v8hu)__b);
}
+/// \brief Adds the corresponding elements of two 128-bit vectors of [4 x i32],
+/// saving the lower 32 bits of each sum in the corresponding element of a
+/// 128-bit result vector of [4 x i32]. The integer elements of both
+/// parameters can be either signed or unsigned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPADDD / PADDD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x i32].
+/// \param __b
+/// A 128-bit vector of [4 x i32].
+/// \returns A 128-bit vector of [4 x i32] containing the sums of both
+/// parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_add_epi32(__m128i __a, __m128i __b)
{
return (__m128i)((__v4su)__a + (__v4su)__b);
}
+/// \brief Adds two signed or unsigned 64-bit integer values, returning the
+/// lower 64 bits of the sum.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> PADDQ </c> instruction.
+///
+/// \param __a
+/// A 64-bit integer.
+/// \param __b
+/// A 64-bit integer.
+/// \returns A 64-bit integer containing the sum of both parameters.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_add_si64(__m64 __a, __m64 __b)
{
return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
}
+/// \brief Adds the corresponding elements of two 128-bit vectors of [2 x i64],
+/// saving the lower 64 bits of each sum in the corresponding element of a
+/// 128-bit result vector of [2 x i64]. The integer elements of both
+/// parameters can be either signed or unsigned.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPADDQ / PADDQ </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x i64].
+/// \param __b
+/// A 128-bit vector of [2 x i64].
+/// \returns A 128-bit vector of [2 x i64] containing the sums of both
+/// parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_add_epi64(__m128i __a, __m128i __b)
{
return (__m128i)((__v2du)__a + (__v2du)__b);
}
+/// \brief Adds, with saturation, the corresponding elements of two 128-bit
+/// signed [16 x i8] vectors, saving each sum in the corresponding element of
+/// a 128-bit result vector of [16 x i8]. Positive sums greater than 7Fh are
+/// saturated to 7Fh. Negative sums less than 80h are saturated to 80h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPADDSB / PADDSB </c> instruction.
+///
+/// \param __a
+/// A 128-bit signed [16 x i8] vector.
+/// \param __b
+/// A 128-bit signed [16 x i8] vector.
+/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of
+/// both parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_adds_epi8(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
}
+/// \brief Adds, with saturation, the corresponding elements of two 128-bit
+/// signed [8 x i16] vectors, saving each sum in the corresponding element of
+/// a 128-bit result vector of [8 x i16]. Positive sums greater than 7FFFh
+/// are saturated to 7FFFh. Negative sums less than 8000h are saturated to
+/// 8000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPADDSW / PADDSW </c> instruction.
+///
+/// \param __a
+/// A 128-bit signed [8 x i16] vector.
+/// \param __b
+/// A 128-bit signed [8 x i16] vector.
+/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of
+/// both parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_adds_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
}
+/// \brief Adds, with saturation, the corresponding elements of two 128-bit
+/// unsigned [16 x i8] vectors, saving each sum in the corresponding element
+/// of a 128-bit result vector of [16 x i8]. Positive sums greater than FFh
+/// are saturated to FFh. Negative sums are saturated to 00h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPADDUSB / PADDUSB </c> instruction.
+///
+/// \param __a
+/// A 128-bit unsigned [16 x i8] vector.
+/// \param __b
+/// A 128-bit unsigned [16 x i8] vector.
+/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums
+/// of both parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_adds_epu8(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
}
+/// \brief Adds, with saturation, the corresponding elements of two 128-bit
+/// unsigned [8 x i16] vectors, saving each sum in the corresponding element
+/// of a 128-bit result vector of [8 x i16]. Positive sums greater than FFFFh
+/// are saturated to FFFFh. Negative sums are saturated to 0000h.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPADDUSB / PADDUSB </c> instruction.
+///
+/// \param __a
+/// A 128-bit unsigned [8 x i16] vector.
+/// \param __b
+/// A 128-bit unsigned [8 x i16] vector.
+/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums
+/// of both parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_adds_epu16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
}
+/// \brief Computes the rounded avarages of corresponding elements of two
+/// 128-bit unsigned [16 x i8] vectors, saving each result in the
+/// corresponding element of a 128-bit result vector of [16 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPAVGB / PAVGB </c> instruction.
+///
+/// \param __a
+/// A 128-bit unsigned [16 x i8] vector.
+/// \param __b
+/// A 128-bit unsigned [16 x i8] vector.
+/// \returns A 128-bit unsigned [16 x i8] vector containing the rounded
+/// averages of both parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_avg_epu8(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
}
+/// \brief Computes the rounded avarages of corresponding elements of two
+/// 128-bit unsigned [8 x i16] vectors, saving each result in the
+/// corresponding element of a 128-bit result vector of [8 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPAVGW / PAVGW </c> instruction.
+///
+/// \param __a
+/// A 128-bit unsigned [8 x i16] vector.
+/// \param __b
+/// A 128-bit unsigned [8 x i16] vector.
+/// \returns A 128-bit unsigned [8 x i16] vector containing the rounded
+/// averages of both parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_avg_epu16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
}
+/// \brief Multiplies the corresponding elements of two 128-bit signed [8 x i16]
+/// vectors, producing eight intermediate 32-bit signed integer products, and
+/// adds the consecutive pairs of 32-bit products to form a 128-bit signed
+/// [4 x i32] vector. For example, bits [15:0] of both parameters are
+/// multiplied producing a 32-bit product, bits [31:16] of both parameters
+/// are multiplied producing a 32-bit product, and the sum of those two
+/// products becomes bits [31:0] of the result.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPMADDWD / PMADDWD </c> instruction.
+///
+/// \param __a
+/// A 128-bit signed [8 x i16] vector.
+/// \param __b
+/// A 128-bit signed [8 x i16] vector.
+/// \returns A 128-bit signed [4 x i32] vector containing the sums of products
+/// of both parameters.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_madd_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
}
+/// \brief Compares corresponding elements of two 128-bit signed [8 x i16]
+/// vectors, saving the greater value from each comparison in the
+/// corresponding element of a 128-bit result vector of [8 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPMAXSW / PMAXSW </c> instruction.
+///
+/// \param __a
+/// A 128-bit signed [8 x i16] vector.
+/// \param __b
+/// A 128-bit signed [8 x i16] vector.
+/// \returns A 128-bit signed [8 x i16] vector containing the greater value of
+/// each comparison.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)__a, (__v8hi)__b);
}
+/// \brief Compares corresponding elements of two 128-bit unsigned [16 x i8]
+/// vectors, saving the greater value from each comparison in the
+/// corresponding element of a 128-bit result vector of [16 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPMAXUB / PMAXUB </c> instruction.
+///
+/// \param __a
+/// A 128-bit unsigned [16 x i8] vector.
+/// \param __b
+/// A 128-bit unsigned [16 x i8] vector.
+/// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of
+/// each comparison.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_max_epu8(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pmaxub128((__v16qi)__a, (__v16qi)__b);
}
+/// \brief Compares corresponding elements of two 128-bit signed [8 x i16]
+/// vectors, saving the smaller value from each comparison in the
+/// corresponding element of a 128-bit result vector of [8 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPMINSW / PMINSW </c> instruction.
+///
+/// \param __a
+/// A 128-bit signed [8 x i16] vector.
+/// \param __b
+/// A 128-bit signed [8 x i16] vector.
+/// \returns A 128-bit signed [8 x i16] vector containing the smaller value of
+/// each comparison.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pminsw128((__v8hi)__a, (__v8hi)__b);
}
+/// \brief Compares corresponding elements of two 128-bit unsigned [16 x i8]
+/// vectors, saving the smaller value from each comparison in the
+/// corresponding element of a 128-bit result vector of [16 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPMINUB / PMINUB </c> instruction.
+///
+/// \param __a
+/// A 128-bit unsigned [16 x i8] vector.
+/// \param __b
+/// A 128-bit unsigned [16 x i8] vector.
+/// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of
+/// each comparison.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_min_epu8(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pminub128((__v16qi)__a, (__v16qi)__b);
}
+/// \brief Multiplies the corresponding elements of two signed [8 x i16]
+/// vectors, saving the upper 16 bits of each 32-bit product in the
+/// corresponding element of a 128-bit signed [8 x i16] result vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPMULHW / PMULHW </c> instruction.
+///
+/// \param __a
+/// A 128-bit signed [8 x i16] vector.
+/// \param __b
+/// A 128-bit signed [8 x i16] vector.
+/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
+/// each of the eight 32-bit products.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mulhi_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
}
+/// \brief Multiplies the corresponding elements of two unsigned [8 x i16]
+/// vectors, saving the upper 16 bits of each 32-bit product in the
+/// corresponding element of a 128-bit unsigned [8 x i16] result vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPMULHUW / PMULHUW </c> instruction.
+///
+/// \param __a
+/// A 128-bit unsigned [8 x i16] vector.
+/// \param __b
+/// A 128-bit unsigned [8 x i16] vector.
+/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
+/// of each of the eight 32-bit products.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mulhi_epu16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
}
-/// \brief Multiplies the corresponding elements of two [8 x short] vectors and
-/// returns a vector containing the low-order 16 bits of each 32-bit product
-/// in the corresponding element.
+/// \brief Multiplies the corresponding elements of two signed [8 x i16]
+/// vectors, saving the lower 16 bits of each 32-bit product in the
+/// corresponding element of a 128-bit signed [8 x i16] result vector.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPMULLW / PMULLW instruction.
+/// This intrinsic corresponds to the <c> VPMULLW / PMULLW </c> instruction.
///
/// \param __a
-/// A 128-bit integer vector containing one of the source operands.
+/// A 128-bit signed [8 x i16] vector.
/// \param __b
-/// A 128-bit integer vector containing one of the source operands.
-/// \returns A 128-bit integer vector containing the products of both operands.
+/// A 128-bit signed [8 x i16] vector.
+/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
+/// each of the eight 32-bit products.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_mullo_epi16(__m128i __a, __m128i __b)
{
@@ -781,7 +2335,7 @@ _mm_mullo_epi16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PMULUDQ instruction.
+/// This intrinsic corresponds to the <c> PMULUDQ </c> instruction.
///
/// \param __a
/// A 64-bit integer containing one of the source operands.
@@ -800,7 +2354,7 @@ _mm_mul_su32(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPMULUDQ / PMULUDQ instruction.
+/// This intrinsic corresponds to the <c> VPMULUDQ / PMULUDQ </c> instruction.
///
/// \param __a
/// A [2 x i64] vector containing one of the source operands.
@@ -821,7 +2375,7 @@ _mm_mul_epu32(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSADBW / PSADBW instruction.
+/// This intrinsic corresponds to the <c> VPSADBW / PSADBW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing one of the source operands.
@@ -839,7 +2393,7 @@ _mm_sad_epu8(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSUBB / PSUBB instruction.
+/// This intrinsic corresponds to the <c> VPSUBB / PSUBB </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the minuends.
@@ -857,7 +2411,7 @@ _mm_sub_epi8(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSUBW / PSUBW instruction.
+/// This intrinsic corresponds to the <c> VPSUBW / PSUBW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the minuends.
@@ -875,7 +2429,7 @@ _mm_sub_epi16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSUBD / PSUBD instruction.
+/// This intrinsic corresponds to the <c> VPSUBD / PSUBD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the minuends.
@@ -894,7 +2448,7 @@ _mm_sub_epi32(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSUBQ instruction.
+/// This intrinsic corresponds to the <c> PSUBQ </c> instruction.
///
/// \param __a
/// A 64-bit integer vector containing the minuend.
@@ -912,7 +2466,7 @@ _mm_sub_si64(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSUBQ / PSUBQ instruction.
+/// This intrinsic corresponds to the <c> VPSUBQ / PSUBQ </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the minuends.
@@ -933,7 +2487,7 @@ _mm_sub_epi64(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSUBSB / PSUBSB instruction.
+/// This intrinsic corresponds to the <c> VPSUBSB / PSUBSB </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the minuends.
@@ -954,7 +2508,7 @@ _mm_subs_epi8(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSUBSW / PSUBSW instruction.
+/// This intrinsic corresponds to the <c> VPSUBSW / PSUBSW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the minuends.
@@ -974,7 +2528,7 @@ _mm_subs_epi16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSUBUSB / PSUBUSB instruction.
+/// This intrinsic corresponds to the <c> VPSUBUSB / PSUBUSB </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the minuends.
@@ -994,7 +2548,7 @@ _mm_subs_epu8(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSUBUSW / PSUBUSW instruction.
+/// This intrinsic corresponds to the <c> VPSUBUSW / PSUBUSW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the minuends.
@@ -1012,7 +2566,7 @@ _mm_subs_epu16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPAND / PAND instruction.
+/// This intrinsic corresponds to the <c> VPAND / PAND </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing one of the source operands.
@@ -1031,7 +2585,7 @@ _mm_and_si128(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPANDN / PANDN instruction.
+/// This intrinsic corresponds to the <c> VPANDN / PANDN </c> instruction.
///
/// \param __a
/// A 128-bit vector containing the left source operand. The one's complement
@@ -1049,7 +2603,7 @@ _mm_andnot_si128(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPOR / POR instruction.
+/// This intrinsic corresponds to the <c> VPOR / POR </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing one of the source operands.
@@ -1067,7 +2621,7 @@ _mm_or_si128(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPXOR / PXOR instruction.
+/// This intrinsic corresponds to the <c> VPXOR / PXOR </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing one of the source operands.
@@ -1090,13 +2644,13 @@ _mm_xor_si128(__m128i __a, __m128i __b)
/// __m128i _mm_slli_si128(__m128i a, const int imm);
/// \endcode
///
-/// This intrinsic corresponds to the \c VPSLLDQ / PSLLDQ instruction.
+/// This intrinsic corresponds to the <c> VPSLLDQ / PSLLDQ </c> instruction.
///
/// \param a
/// A 128-bit integer vector containing the source operand.
/// \param imm
-/// An immediate value specifying the number of bytes to left-shift
-/// operand a.
+/// An immediate value specifying the number of bytes to left-shift operand
+/// \a a.
/// \returns A 128-bit integer vector containing the left-shifted value.
#define _mm_slli_si128(a, imm) __extension__ ({ \
(__m128i)__builtin_shufflevector( \
@@ -1127,13 +2681,13 @@ _mm_xor_si128(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSLLW / PSLLW instruction.
+/// This intrinsic corresponds to the <c> VPSLLW / PSLLW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// An integer value specifying the number of bits to left-shift each value
-/// in operand __a.
+/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_slli_epi16(__m128i __a, int __count)
@@ -1146,13 +2700,13 @@ _mm_slli_epi16(__m128i __a, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSLLW / PSLLW instruction.
+/// This intrinsic corresponds to the <c> VPSLLW / PSLLW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
-/// to left-shift each value in operand __a.
+/// to left-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sll_epi16(__m128i __a, __m128i __count)
@@ -1165,13 +2719,13 @@ _mm_sll_epi16(__m128i __a, __m128i __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSLLD / PSLLD instruction.
+/// This intrinsic corresponds to the <c> VPSLLD / PSLLD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// An integer value specifying the number of bits to left-shift each value
-/// in operand __a.
+/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_slli_epi32(__m128i __a, int __count)
@@ -1184,13 +2738,13 @@ _mm_slli_epi32(__m128i __a, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSLLD / PSLLD instruction.
+/// This intrinsic corresponds to the <c> VPSLLD / PSLLD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
-/// to left-shift each value in operand __a.
+/// to left-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sll_epi32(__m128i __a, __m128i __count)
@@ -1203,13 +2757,13 @@ _mm_sll_epi32(__m128i __a, __m128i __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSLLQ / PSLLQ instruction.
+/// This intrinsic corresponds to the <c> VPSLLQ / PSLLQ </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// An integer value specifying the number of bits to left-shift each value
-/// in operand __a.
+/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_slli_epi64(__m128i __a, int __count)
@@ -1222,13 +2776,13 @@ _mm_slli_epi64(__m128i __a, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSLLQ / PSLLQ instruction.
+/// This intrinsic corresponds to the <c> VPSLLQ / PSLLQ </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
-/// to left-shift each value in operand __a.
+/// to left-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sll_epi64(__m128i __a, __m128i __count)
@@ -1242,13 +2796,13 @@ _mm_sll_epi64(__m128i __a, __m128i __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSRAW / PSRAW instruction.
+/// This intrinsic corresponds to the <c> VPSRAW / PSRAW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// An integer value specifying the number of bits to right-shift each value
-/// in operand __a.
+/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_srai_epi16(__m128i __a, int __count)
@@ -1262,13 +2816,13 @@ _mm_srai_epi16(__m128i __a, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSRAW / PSRAW instruction.
+/// This intrinsic corresponds to the <c> VPSRAW / PSRAW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
-/// to right-shift each value in operand __a.
+/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sra_epi16(__m128i __a, __m128i __count)
@@ -1282,13 +2836,13 @@ _mm_sra_epi16(__m128i __a, __m128i __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSRAD / PSRAD instruction.
+/// This intrinsic corresponds to the <c> VPSRAD / PSRAD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// An integer value specifying the number of bits to right-shift each value
-/// in operand __a.
+/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_srai_epi32(__m128i __a, int __count)
@@ -1302,13 +2856,13 @@ _mm_srai_epi32(__m128i __a, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSRAD / PSRAD instruction.
+/// This intrinsic corresponds to the <c> VPSRAD / PSRAD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
-/// to right-shift each value in operand __a.
+/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_sra_epi32(__m128i __a, __m128i __count)
@@ -1325,13 +2879,13 @@ _mm_sra_epi32(__m128i __a, __m128i __count)
/// __m128i _mm_srli_si128(__m128i a, const int imm);
/// \endcode
///
-/// This intrinsic corresponds to the \c VPSRLDQ / PSRLDQ instruction.
+/// This intrinsic corresponds to the <c> VPSRLDQ / PSRLDQ </c> instruction.
///
/// \param a
/// A 128-bit integer vector containing the source operand.
/// \param imm
/// An immediate value specifying the number of bytes to right-shift operand
-/// a.
+/// \a a.
/// \returns A 128-bit integer vector containing the right-shifted value.
#define _mm_srli_si128(a, imm) __extension__ ({ \
(__m128i)__builtin_shufflevector( \
@@ -1362,13 +2916,13 @@ _mm_sra_epi32(__m128i __a, __m128i __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSRLW / PSRLW instruction.
+/// This intrinsic corresponds to the <c> VPSRLW / PSRLW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// An integer value specifying the number of bits to right-shift each value
-/// in operand __a.
+/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_srli_epi16(__m128i __a, int __count)
@@ -1381,13 +2935,13 @@ _mm_srli_epi16(__m128i __a, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSRLW / PSRLW instruction.
+/// This intrinsic corresponds to the <c> VPSRLW / PSRLW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
-/// to right-shift each value in operand __a.
+/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_srl_epi16(__m128i __a, __m128i __count)
@@ -1400,13 +2954,13 @@ _mm_srl_epi16(__m128i __a, __m128i __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSRLD / PSRLD instruction.
+/// This intrinsic corresponds to the <c> VPSRLD / PSRLD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// An integer value specifying the number of bits to right-shift each value
-/// in operand __a.
+/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_srli_epi32(__m128i __a, int __count)
@@ -1419,13 +2973,13 @@ _mm_srli_epi32(__m128i __a, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSRLD / PSRLD instruction.
+/// This intrinsic corresponds to the <c> VPSRLD / PSRLD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
-/// to right-shift each value in operand __a.
+/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_srl_epi32(__m128i __a, __m128i __count)
@@ -1438,13 +2992,13 @@ _mm_srl_epi32(__m128i __a, __m128i __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSRLQ / PSRLQ instruction.
+/// This intrinsic corresponds to the <c> VPSRLQ / PSRLQ </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// An integer value specifying the number of bits to right-shift each value
-/// in operand __a.
+/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_srli_epi64(__m128i __a, int __count)
@@ -1457,13 +3011,13 @@ _mm_srli_epi64(__m128i __a, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSRLQ / PSRLQ instruction.
+/// This intrinsic corresponds to the <c> VPSRLQ / PSRLQ </c> instruction.
///
/// \param __a
/// A 128-bit integer vector containing the source operand.
/// \param __count
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
-/// to right-shift each value in operand __a.
+/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_srl_epi64(__m128i __a, __m128i __count)
@@ -1477,7 +3031,7 @@ _mm_srl_epi64(__m128i __a, __m128i __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPCMPEQB / PCMPEQB instruction.
+/// This intrinsic corresponds to the <c> VPCMPEQB / PCMPEQB </c> instruction.
///
/// \param __a
/// A 128-bit integer vector.
@@ -1496,7 +3050,7 @@ _mm_cmpeq_epi8(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPCMPEQW / PCMPEQW instruction.
+/// This intrinsic corresponds to the <c> VPCMPEQW / PCMPEQW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector.
@@ -1515,7 +3069,7 @@ _mm_cmpeq_epi16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPCMPEQD / PCMPEQD instruction.
+/// This intrinsic corresponds to the <c> VPCMPEQD / PCMPEQD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector.
@@ -1535,7 +3089,7 @@ _mm_cmpeq_epi32(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPCMPGTB / PCMPGTB instruction.
+/// This intrinsic corresponds to the <c> VPCMPGTB / PCMPGTB </c> instruction.
///
/// \param __a
/// A 128-bit integer vector.
@@ -1557,7 +3111,7 @@ _mm_cmpgt_epi8(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPCMPGTW / PCMPGTW instruction.
+/// This intrinsic corresponds to the <c> VPCMPGTW / PCMPGTW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector.
@@ -1577,7 +3131,7 @@ _mm_cmpgt_epi16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPCMPGTD / PCMPGTD instruction.
+/// This intrinsic corresponds to the <c> VPCMPGTD / PCMPGTD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector.
@@ -1597,7 +3151,7 @@ _mm_cmpgt_epi32(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPCMPGTB / PCMPGTB instruction.
+/// This intrinsic corresponds to the <c> VPCMPGTB / PCMPGTB </c> instruction.
///
/// \param __a
/// A 128-bit integer vector.
@@ -1617,7 +3171,7 @@ _mm_cmplt_epi8(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPCMPGTW / PCMPGTW instruction.
+/// This intrinsic corresponds to the <c> VPCMPGTW / PCMPGTW </c> instruction.
///
/// \param __a
/// A 128-bit integer vector.
@@ -1637,7 +3191,7 @@ _mm_cmplt_epi16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPCMPGTD / PCMPGTD instruction.
+/// This intrinsic corresponds to the <c> VPCMPGTD / PCMPGTD </c> instruction.
///
/// \param __a
/// A 128-bit integer vector.
@@ -1658,7 +3212,7 @@ _mm_cmplt_epi32(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTSI2SD / CVTSI2SD instruction.
+/// This intrinsic corresponds to the <c> VCVTSI2SD / CVTSI2SD </c> instruction.
///
/// \param __a
/// A 128-bit vector of [2 x double]. The upper 64 bits of this operand are
@@ -1680,7 +3234,7 @@ _mm_cvtsi64_sd(__m128d __a, long long __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTSD2SI / CVTSD2SI instruction.
+/// This intrinsic corresponds to the <c> VCVTSD2SI / CVTSD2SI </c> instruction.
///
/// \param __a
/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
@@ -1697,7 +3251,8 @@ _mm_cvtsd_si64(__m128d __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTTSD2SI / CVTTSD2SI instruction.
+/// This intrinsic corresponds to the <c> VCVTTSD2SI / CVTTSD2SI </c>
+/// instruction.
///
/// \param __a
/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
@@ -1714,7 +3269,7 @@ _mm_cvttsd_si64(__m128d __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTDQ2PS / CVTDQ2PS instruction.
+/// This intrinsic corresponds to the <c> VCVTDQ2PS / CVTDQ2PS </c> instruction.
///
/// \param __a
/// A 128-bit integer vector.
@@ -1729,7 +3284,7 @@ _mm_cvtepi32_ps(__m128i __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTPS2DQ / CVTPS2DQ instruction.
+/// This intrinsic corresponds to the <c> VCVTPS2DQ / CVTPS2DQ </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1746,7 +3301,8 @@ _mm_cvtps_epi32(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTTPS2DQ / CVTTPS2DQ instruction.
+/// This intrinsic corresponds to the <c> VCVTTPS2DQ / CVTTPS2DQ </c>
+/// instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1762,7 +3318,7 @@ _mm_cvttps_epi32(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
///
/// \param __a
/// A 32-bit signed integer operand.
@@ -1779,7 +3335,7 @@ _mm_cvtsi32_si128(int __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVQ / MOVQ instruction.
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
///
/// \param __a
/// A 64-bit signed integer operand containing the value to be converted.
@@ -1796,7 +3352,7 @@ _mm_cvtsi64_si128(long long __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
///
/// \param __a
/// A vector of [4 x i32]. The least significant 32 bits are moved to the
@@ -1815,7 +3371,7 @@ _mm_cvtsi128_si32(__m128i __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVQ / MOVQ instruction.
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
///
/// \param __a
/// A vector of [2 x i64]. The least significant 64 bits are moved to the
@@ -1833,7 +3389,7 @@ _mm_cvtsi128_si64(__m128i __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVDQA / MOVDQA instruction.
+/// This intrinsic corresponds to the <c> VMOVDQA / MOVDQA </c> instruction.
///
/// \param __p
/// An aligned pointer to a memory location containing integer values.
@@ -1849,7 +3405,7 @@ _mm_load_si128(__m128i const *__p)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVDQU / MOVDQU instruction.
+/// This intrinsic corresponds to the <c> VMOVDQU / MOVDQU </c> instruction.
///
/// \param __p
/// A pointer to a memory location containing integer values.
@@ -1868,7 +3424,7 @@ _mm_loadu_si128(__m128i const *__p)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVQ / MOVQ instruction.
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
///
/// \param __p
/// A 128-bit vector of [2 x i64]. Bits [63:0] are written to bits [63:0] of
@@ -2154,42 +3710,170 @@ _mm_set1_epi8(char __b)
return (__m128i)(__v16qi){ __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b };
}
+/// \brief Constructs a 128-bit integer vector, initialized in reverse order
+/// with the specified 64-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKLQDQ / PUNPCKLQDQ </c>
+/// instruction.
+///
+/// \param __q0
+/// A 64-bit integral value used to initialize the lower 64 bits of the
+/// result.
+/// \param __q1
+/// A 64-bit integral value used to initialize the upper 64 bits of the
+/// result.
+/// \returns An initialized 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_setr_epi64(__m64 __q0, __m64 __q1)
{
return (__m128i){ (long long)__q0, (long long)__q1 };
}
+/// \brief Constructs a 128-bit integer vector, initialized in reverse order
+/// with the specified 32-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __i0
+/// A 32-bit integral value used to initialize bits [31:0] of the result.
+/// \param __i1
+/// A 32-bit integral value used to initialize bits [63:32] of the result.
+/// \param __i2
+/// A 32-bit integral value used to initialize bits [95:64] of the result.
+/// \param __i3
+/// A 32-bit integral value used to initialize bits [127:96] of the result.
+/// \returns An initialized 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
{
return (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
}
+/// \brief Constructs a 128-bit integer vector, initialized in reverse order
+/// with the specified 16-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __w0
+/// A 16-bit integral value used to initialize bits [15:0] of the result.
+/// \param __w1
+/// A 16-bit integral value used to initialize bits [31:16] of the result.
+/// \param __w2
+/// A 16-bit integral value used to initialize bits [47:32] of the result.
+/// \param __w3
+/// A 16-bit integral value used to initialize bits [63:48] of the result.
+/// \param __w4
+/// A 16-bit integral value used to initialize bits [79:64] of the result.
+/// \param __w5
+/// A 16-bit integral value used to initialize bits [95:80] of the result.
+/// \param __w6
+/// A 16-bit integral value used to initialize bits [111:96] of the result.
+/// \param __w7
+/// A 16-bit integral value used to initialize bits [127:112] of the result.
+/// \returns An initialized 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
{
return (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
}
+/// \brief Constructs a 128-bit integer vector, initialized in reverse order
+/// with the specified 8-bit integral values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic is a utility function and does not correspond to a specific
+/// instruction.
+///
+/// \param __b0
+/// An 8-bit integral value used to initialize bits [7:0] of the result.
+/// \param __b1
+/// An 8-bit integral value used to initialize bits [15:8] of the result.
+/// \param __b2
+/// An 8-bit integral value used to initialize bits [23:16] of the result.
+/// \param __b3
+/// An 8-bit integral value used to initialize bits [31:24] of the result.
+/// \param __b4
+/// An 8-bit integral value used to initialize bits [39:32] of the result.
+/// \param __b5
+/// An 8-bit integral value used to initialize bits [47:40] of the result.
+/// \param __b6
+/// An 8-bit integral value used to initialize bits [55:48] of the result.
+/// \param __b7
+/// An 8-bit integral value used to initialize bits [63:56] of the result.
+/// \param __b8
+/// An 8-bit integral value used to initialize bits [71:64] of the result.
+/// \param __b9
+/// An 8-bit integral value used to initialize bits [79:72] of the result.
+/// \param __b10
+/// An 8-bit integral value used to initialize bits [87:80] of the result.
+/// \param __b11
+/// An 8-bit integral value used to initialize bits [95:88] of the result.
+/// \param __b12
+/// An 8-bit integral value used to initialize bits [103:96] of the result.
+/// \param __b13
+/// An 8-bit integral value used to initialize bits [111:104] of the result.
+/// \param __b14
+/// An 8-bit integral value used to initialize bits [119:112] of the result.
+/// \param __b15
+/// An 8-bit integral value used to initialize bits [127:120] of the result.
+/// \returns An initialized 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
{
return (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
}
+/// \brief Creates a 128-bit integer vector initialized to zero.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
+///
+/// \returns An initialized 128-bit integer vector with all elements set to
+/// zero.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_setzero_si128(void)
{
return (__m128i){ 0LL, 0LL };
}
+/// \brief Stores a 128-bit integer vector to a memory location aligned on a
+/// 128-bit boundary.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
+///
+/// \param __p
+/// A pointer to an aligned memory location that will receive the integer
+/// values.
+/// \param __b
+/// A 128-bit integer vector containing the values to be moved.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_store_si128(__m128i *__p, __m128i __b)
{
*__p = __b;
}
+/// \brief Stores a 128-bit integer vector to an unaligned memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
+///
+/// \param __p
+/// A pointer to a memory location that will receive the integer values.
+/// \param __b
+/// A 128-bit integer vector containing the values to be moved.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storeu_si128(__m128i *__p, __m128i __b)
{
@@ -2199,12 +3883,45 @@ _mm_storeu_si128(__m128i *__p, __m128i __b)
((struct __storeu_si128*)__p)->__v = __b;
}
+/// \brief Moves bytes selected by the mask from the first operand to the
+/// specified unaligned memory location. When a mask bit is 1, the
+/// corresponding byte is written, otherwise it is not written. To minimize
+/// caching, the date is flagged as non-temporal (unlikely to be used again
+/// soon). Exception and trap behavior for elements not selected for storage
+/// to memory are implementation dependent.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMASKMOVDQU / MASKMOVDQU </c>
+/// instruction.
+///
+/// \param __d
+/// A 128-bit integer vector containing the values to be moved.
+/// \param __n
+/// A 128-bit integer vector containing the mask. The most significant bit of
+/// each byte represents the mask bits.
+/// \param __p
+/// A pointer to an unaligned 128-bit memory location where the specified
+/// values are moved.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
{
__builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
}
+/// \brief Stores the lower 64 bits of a 128-bit integer vector of [2 x i64] to
+/// a memory location.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVLPS / MOVLPS </c> instruction.
+///
+/// \param __p
+/// A pointer to a 64-bit memory location that will receive the lower 64 bits
+/// of the integer vector parameter.
+/// \param __a
+/// A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the
+/// value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_storel_epi64(__m128i *__p, __m128i __a)
{
@@ -2214,18 +3931,54 @@ _mm_storel_epi64(__m128i *__p, __m128i __a)
((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
}
+/// \brief Stores a 128-bit floating point vector of [2 x double] to a 128-bit
+/// aligned memory location. To minimize caching, the data is flagged as
+/// non-temporal (unlikely to be used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
+///
+/// \param __p
+/// A pointer to the 128-bit aligned memory location used to store the value.
+/// \param __a
+/// A vector of [2 x double] containing the 64-bit values to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_stream_pd(double *__p, __m128d __a)
{
__builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
}
+/// \brief Stores a 128-bit integer vector to a 128-bit aligned memory location.
+/// To minimize caching, the data is flagged as non-temporal (unlikely to be
+/// used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
+///
+/// \param __p
+/// A pointer to the 128-bit aligned memory location used to store the value.
+/// \param __a
+/// A 128-bit integer vector containing the values to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_stream_si128(__m128i *__p, __m128i __a)
{
__builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
}
+/// \brief Stores a 32-bit integer value in the specified memory location. To
+/// minimize caching, the data is flagged as non-temporal (unlikely to be
+/// used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> MOVNTI </c> instruction.
+///
+/// \param __p
+/// A pointer to the 32-bit memory location used to store the value.
+/// \param __a
+/// A 32-bit integer containing the value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_stream_si32(int *__p, int __a)
{
@@ -2233,6 +3986,18 @@ _mm_stream_si32(int *__p, int __a)
}
#ifdef __x86_64__
+/// \brief Stores a 64-bit integer value in the specified memory location. To
+/// minimize caching, the data is flagged as non-temporal (unlikely to be
+/// used again soon).
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> MOVNTIQ </c> instruction.
+///
+/// \param __p
+/// A pointer to the 64-bit memory location used to store the value.
+/// \param __a
+/// A 64-bit integer containing the value to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_stream_si64(long long *__p, long long __a)
{
@@ -2240,42 +4005,154 @@ _mm_stream_si64(long long *__p, long long __a)
}
#endif
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_clflush(void const *__p)
-{
- __builtin_ia32_clflush(__p);
-}
+#if defined(__cplusplus)
+extern "C" {
+#endif
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_lfence(void)
-{
- __builtin_ia32_lfence();
-}
+/// \brief The cache line containing \a __p is flushed and invalidated from all
+/// caches in the coherency domain.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> CLFLUSH </c> instruction.
+///
+/// \param __p
+/// A pointer to the memory location used to identify the cache line to be
+/// flushed.
+void _mm_clflush(void const *);
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_mfence(void)
-{
- __builtin_ia32_mfence();
-}
+/// \brief Forces strong memory ordering (serialization) between load
+/// instructions preceding this instruction and load instructions following
+/// this instruction, ensuring the system completes all previous loads before
+/// executing subsequent loads.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> LFENCE </c> instruction.
+///
+void _mm_lfence(void);
+/// \brief Forces strong memory ordering (serialization) between load and store
+/// instructions preceding this instruction and load and store instructions
+/// following this instruction, ensuring that the system completes all
+/// previous memory accesses before executing subsequent memory accesses.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> MFENCE </c> instruction.
+///
+void _mm_mfence(void);
+
+#if defined(__cplusplus)
+} // extern "C"
+#endif
+
+/// \brief Converts 16-bit signed integers from both 128-bit integer vector
+/// operands into 8-bit signed integers, and packs the results into the
+/// destination. Positive values greater than 0x7F are saturated to 0x7F.
+/// Negative values less than 0x80 are saturated to 0x80.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPACKSSWB / PACKSSWB </c> instruction.
+///
+/// \param __a
+/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
+/// a signed integer and is converted to a 8-bit signed integer with
+/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less
+/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are
+/// written to the lower 64 bits of the result.
+/// \param __b
+/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
+/// a signed integer and is converted to a 8-bit signed integer with
+/// saturation. Values greater than 0x7F are saturated to 0x7F. Values less
+/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are
+/// written to the higher 64 bits of the result.
+/// \returns A 128-bit vector of [16 x i8] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_packs_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
}
+/// \brief Converts 32-bit signed integers from both 128-bit integer vector
+/// operands into 16-bit signed integers, and packs the results into the
+/// destination. Positive values greater than 0x7FFF are saturated to 0x7FFF.
+/// Negative values less than 0x8000 are saturated to 0x8000.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPACKSSDW / PACKSSDW </c> instruction.
+///
+/// \param __a
+/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
+/// a signed integer and is converted to a 16-bit signed integer with
+/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
+/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
+/// are written to the lower 64 bits of the result.
+/// \param __b
+/// A 128-bit integer vector of [4 x i32]. Each 32-bit element is treated as
+/// a signed integer and is converted to a 16-bit signed integer with
+/// saturation. Values greater than 0x7FFF are saturated to 0x7FFF. Values
+/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
+/// are written to the higher 64 bits of the result.
+/// \returns A 128-bit vector of [8 x i16] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_packs_epi32(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
}
+/// \brief Converts 16-bit signed integers from both 128-bit integer vector
+/// operands into 8-bit unsigned integers, and packs the results into the
+/// destination. Values greater than 0xFF are saturated to 0xFF. Values less
+/// than 0x00 are saturated to 0x00.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPACKUSWB / PACKUSWB </c> instruction.
+///
+/// \param __a
+/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
+/// a signed integer and is converted to an 8-bit unsigned integer with
+/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
+/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are
+/// written to the lower 64 bits of the result.
+/// \param __b
+/// A 128-bit integer vector of [8 x i16]. Each 16-bit element is treated as
+/// a signed integer and is converted to an 8-bit unsigned integer with
+/// saturation. Values greater than 0xFF are saturated to 0xFF. Values less
+/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are
+/// written to the higher 64 bits of the result.
+/// \returns A 128-bit vector of [16 x i8] containing the converted values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_packus_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
}
+/// \brief Extracts 16 bits from a 128-bit integer vector of [8 x i16], using
+/// the immediate-value parameter as a selector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \param __imm
+/// An immediate value. Bits [3:0] selects values from \a __a to be assigned
+/// to bits[15:0] of the result. \n
+/// 000: assign values from bits [15:0] of \a __a. \n
+/// 001: assign values from bits [31:16] of \a __a. \n
+/// 010: assign values from bits [47:32] of \a __a. \n
+/// 011: assign values from bits [63:48] of \a __a. \n
+/// 100: assign values from bits [79:64] of \a __a. \n
+/// 101: assign values from bits [95:80] of \a __a. \n
+/// 110: assign values from bits [111:96] of \a __a. \n
+/// 111: assign values from bits [127:112] of \a __a.
+/// \returns An integer, whose lower 16 bits are selected from the 128-bit
+/// integer vector parameter and the remaining bits are assigned zeros.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_extract_epi16(__m128i __a, int __imm)
{
@@ -2283,6 +4160,26 @@ _mm_extract_epi16(__m128i __a, int __imm)
return (unsigned short)__b[__imm & 7];
}
+/// \brief Constructs a 128-bit integer vector by first making a copy of the
+/// 128-bit integer vector parameter, and then inserting the lower 16 bits
+/// of an integer parameter into an offset specified by the immediate-value
+/// parameter.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
+///
+/// \param __a
+/// A 128-bit integer vector of [8 x i16]. This vector is copied to the
+/// result and then one of the eight elements in the result is replaced by
+/// the lower 16 bits of \a __b.
+/// \param __b
+/// An integer. The lower 16 bits of this parameter are written to the
+/// result beginning at an offset specified by \a __imm.
+/// \param __imm
+/// An immediate value specifying the bit offset in the result at which the
+/// lower 16 bits of \a __b are written.
+/// \returns A 128-bit integer vector containing the constructed values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_insert_epi16(__m128i __a, int __b, int __imm)
{
@@ -2291,18 +4188,85 @@ _mm_insert_epi16(__m128i __a, int __b, int __imm)
return (__m128i)__c;
}
+/// \brief Copies the values of the most significant bits from each 8-bit
+/// element in a 128-bit integer vector of [16 x i8] to create a 16-bit mask
+/// value, zero-extends the value, and writes it to the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPMOVMSKB / PMOVMSKB </c> instruction.
+///
+/// \param __a
+/// A 128-bit integer vector containing the values with bits to be extracted.
+/// \returns The most significant bits from each 8-bit element in \a __a,
+/// written to bits [15:0]. The other bits are assigned zeros.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_movemask_epi8(__m128i __a)
{
return __builtin_ia32_pmovmskb128((__v16qi)__a);
}
+/// \brief Constructs a 128-bit integer vector by shuffling four 32-bit
+/// elements of a 128-bit integer vector parameter, using the immediate-value
+/// parameter as a specifier.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_shuffle_epi32(__m128i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VPSHUFD / PSHUFD </c> instruction.
+///
+/// \param a
+/// A 128-bit integer vector containing the values to be copied.
+/// \param imm
+/// An immediate value containing an 8-bit value specifying which elements to
+/// copy from a. The destinations within the 128-bit destination are assigned
+/// values as follows: \n
+/// Bits [1:0] are used to assign values to bits [31:0] of the result. \n
+/// Bits [3:2] are used to assign values to bits [63:32] of the result. \n
+/// Bits [5:4] are used to assign values to bits [95:64] of the result. \n
+/// Bits [7:6] are used to assign values to bits [127:96] of the result. \n
+/// Bit value assignments: \n
+/// 00: assign values from bits [31:0] of \a a. \n
+/// 01: assign values from bits [63:32] of \a a. \n
+/// 10: assign values from bits [95:64] of \a a. \n
+/// 11: assign values from bits [127:96] of \a a.
+/// \returns A 128-bit integer vector containing the shuffled values.
#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
(__m128i)__builtin_shufflevector((__v4si)(__m128i)(a), \
(__v4si)_mm_undefined_si128(), \
((imm) >> 0) & 0x3, ((imm) >> 2) & 0x3, \
((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); })
+/// \brief Constructs a 128-bit integer vector by shuffling four lower 16-bit
+/// elements of a 128-bit integer vector of [8 x i16], using the immediate
+/// value parameter as a specifier.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_shufflelo_epi16(__m128i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VPSHUFLW / PSHUFLW </c> instruction.
+///
+/// \param a
+/// A 128-bit integer vector of [8 x i16]. Bits [127:64] are copied to bits
+/// [127:64] of the result.
+/// \param imm
+/// An 8-bit immediate value specifying which elements to copy from \a a. \n
+/// Bits[1:0] are used to assign values to bits [15:0] of the result. \n
+/// Bits[3:2] are used to assign values to bits [31:16] of the result. \n
+/// Bits[5:4] are used to assign values to bits [47:32] of the result. \n
+/// Bits[7:6] are used to assign values to bits [63:48] of the result. \n
+/// Bit value assignments: \n
+/// 00: assign values from bits [15:0] of \a a. \n
+/// 01: assign values from bits [31:16] of \a a. \n
+/// 10: assign values from bits [47:32] of \a a. \n
+/// 11: assign values from bits [63:48] of \a a. \n
+/// \returns A 128-bit integer vector containing the shuffled values.
#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
(__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
(__v8hi)_mm_undefined_si128(), \
@@ -2310,6 +4274,33 @@ _mm_movemask_epi8(__m128i __a)
((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3, \
4, 5, 6, 7); })
+/// \brief Constructs a 128-bit integer vector by shuffling four upper 16-bit
+/// elements of a 128-bit integer vector of [8 x i16], using the immediate
+/// value parameter as a specifier.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm_shufflehi_epi16(__m128i a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VPSHUFHW / PSHUFHW </c> instruction.
+///
+/// \param a
+/// A 128-bit integer vector of [8 x i16]. Bits [63:0] are copied to bits
+/// [63:0] of the result.
+/// \param imm
+/// An 8-bit immediate value specifying which elements to copy from \a a. \n
+/// Bits[1:0] are used to assign values to bits [79:64] of the result. \n
+/// Bits[3:2] are used to assign values to bits [95:80] of the result. \n
+/// Bits[5:4] are used to assign values to bits [111:96] of the result. \n
+/// Bits[7:6] are used to assign values to bits [127:112] of the result. \n
+/// Bit value assignments: \n
+/// 00: assign values from bits [79:64] of \a a. \n
+/// 01: assign values from bits [95:80] of \a a. \n
+/// 10: assign values from bits [111:96] of \a a. \n
+/// 11: assign values from bits [127:112] of \a a. \n
+/// \returns A 128-bit integer vector containing the shuffled values.
#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
(__m128i)__builtin_shufflevector((__v8hi)(__m128i)(a), \
(__v8hi)_mm_undefined_si128(), \
@@ -2319,137 +4310,480 @@ _mm_movemask_epi8(__m128i __a)
4 + (((imm) >> 4) & 0x3), \
4 + (((imm) >> 6) & 0x3)); })
+/// \brief Unpacks the high-order (index 8-15) values from two 128-bit vectors
+/// of [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKHBW / PUNPCKHBW </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [16 x i8].
+/// Bits [71:64] are written to bits [7:0] of the result. \n
+/// Bits [79:72] are written to bits [23:16] of the result. \n
+/// Bits [87:80] are written to bits [39:32] of the result. \n
+/// Bits [95:88] are written to bits [55:48] of the result. \n
+/// Bits [103:96] are written to bits [71:64] of the result. \n
+/// Bits [111:104] are written to bits [87:80] of the result. \n
+/// Bits [119:112] are written to bits [103:96] of the result. \n
+/// Bits [127:120] are written to bits [119:112] of the result.
+/// \param __b
+/// A 128-bit vector of [16 x i8]. \n
+/// Bits [71:64] are written to bits [15:8] of the result. \n
+/// Bits [79:72] are written to bits [31:24] of the result. \n
+/// Bits [87:80] are written to bits [47:40] of the result. \n
+/// Bits [95:88] are written to bits [63:56] of the result. \n
+/// Bits [103:96] are written to bits [79:72] of the result. \n
+/// Bits [111:104] are written to bits [95:88] of the result. \n
+/// Bits [119:112] are written to bits [111:104] of the result. \n
+/// Bits [127:120] are written to bits [127:120] of the result.
+/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_unpackhi_epi8(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
}
+/// \brief Unpacks the high-order (index 4-7) values from two 128-bit vectors of
+/// [8 x i16] and interleaves them into a 128-bit vector of [8 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKHWD / PUNPCKHWD </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x i16].
+/// Bits [79:64] are written to bits [15:0] of the result. \n
+/// Bits [95:80] are written to bits [47:32] of the result. \n
+/// Bits [111:96] are written to bits [79:64] of the result. \n
+/// Bits [127:112] are written to bits [111:96] of the result.
+/// \param __b
+/// A 128-bit vector of [8 x i16].
+/// Bits [79:64] are written to bits [31:16] of the result. \n
+/// Bits [95:80] are written to bits [63:48] of the result. \n
+/// Bits [111:96] are written to bits [95:80] of the result. \n
+/// Bits [127:112] are written to bits [127:112] of the result.
+/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_unpackhi_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
}
+/// \brief Unpacks the high-order (index 2,3) values from two 128-bit vectors of
+/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKHDQ / PUNPCKHDQ </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x i32]. \n
+/// Bits [95:64] are written to bits [31:0] of the destination. \n
+/// Bits [127:96] are written to bits [95:64] of the destination.
+/// \param __b
+/// A 128-bit vector of [4 x i32]. \n
+/// Bits [95:64] are written to bits [64:32] of the destination. \n
+/// Bits [127:96] are written to bits [127:96] of the destination.
+/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_unpackhi_epi32(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
}
+/// \brief Unpacks the high-order (odd-indexed) values from two 128-bit vectors
+/// of [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKHQDQ / PUNPCKHQDQ </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x i64]. \n
+/// Bits [127:64] are written to bits [63:0] of the destination.
+/// \param __b
+/// A 128-bit vector of [2 x i64]. \n
+/// Bits [127:64] are written to bits [127:64] of the destination.
+/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_unpackhi_epi64(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
}
+/// \brief Unpacks the low-order (index 0-7) values from two 128-bit vectors of
+/// [16 x i8] and interleaves them into a 128-bit vector of [16 x i8].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKLBW / PUNPCKLBW </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [16 x i8]. \n
+/// Bits [7:0] are written to bits [7:0] of the result. \n
+/// Bits [15:8] are written to bits [23:16] of the result. \n
+/// Bits [23:16] are written to bits [39:32] of the result. \n
+/// Bits [31:24] are written to bits [55:48] of the result. \n
+/// Bits [39:32] are written to bits [71:64] of the result. \n
+/// Bits [47:40] are written to bits [87:80] of the result. \n
+/// Bits [55:48] are written to bits [103:96] of the result. \n
+/// Bits [63:56] are written to bits [119:112] of the result.
+/// \param __b
+/// A 128-bit vector of [16 x i8].
+/// Bits [7:0] are written to bits [15:8] of the result. \n
+/// Bits [15:8] are written to bits [31:24] of the result. \n
+/// Bits [23:16] are written to bits [47:40] of the result. \n
+/// Bits [31:24] are written to bits [63:56] of the result. \n
+/// Bits [39:32] are written to bits [79:72] of the result. \n
+/// Bits [47:40] are written to bits [95:88] of the result. \n
+/// Bits [55:48] are written to bits [111:104] of the result. \n
+/// Bits [63:56] are written to bits [127:120] of the result.
+/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_unpacklo_epi8(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
}
+/// \brief Unpacks the low-order (index 0-3) values from each of the two 128-bit
+/// vectors of [8 x i16] and interleaves them into a 128-bit vector of
+/// [8 x i16].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKLWD / PUNPCKLWD </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [8 x i16].
+/// Bits [15:0] are written to bits [15:0] of the result. \n
+/// Bits [31:16] are written to bits [47:32] of the result. \n
+/// Bits [47:32] are written to bits [79:64] of the result. \n
+/// Bits [63:48] are written to bits [111:96] of the result.
+/// \param __b
+/// A 128-bit vector of [8 x i16].
+/// Bits [15:0] are written to bits [31:16] of the result. \n
+/// Bits [31:16] are written to bits [63:48] of the result. \n
+/// Bits [47:32] are written to bits [95:80] of the result. \n
+/// Bits [63:48] are written to bits [127:112] of the result.
+/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_unpacklo_epi16(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
}
+/// \brief Unpacks the low-order (index 0,1) values from two 128-bit vectors of
+/// [4 x i32] and interleaves them into a 128-bit vector of [4 x i32].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKLDQ / PUNPCKLDQ </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [4 x i32]. \n
+/// Bits [31:0] are written to bits [31:0] of the destination. \n
+/// Bits [63:32] are written to bits [95:64] of the destination.
+/// \param __b
+/// A 128-bit vector of [4 x i32]. \n
+/// Bits [31:0] are written to bits [64:32] of the destination. \n
+/// Bits [63:32] are written to bits [127:96] of the destination.
+/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_unpacklo_epi32(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
}
+/// \brief Unpacks the low-order 64-bit elements from two 128-bit vectors of
+/// [2 x i64] and interleaves them into a 128-bit vector of [2 x i64].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VPUNPCKLQDQ / PUNPCKLQDQ </c>
+/// instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x i64]. \n
+/// Bits [63:0] are written to bits [63:0] of the destination. \n
+/// \param __b
+/// A 128-bit vector of [2 x i64]. \n
+/// Bits [63:0] are written to bits [127:64] of the destination. \n
+/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_unpacklo_epi64(__m128i __a, __m128i __b)
{
return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
}
+/// \brief Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
+/// integer.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit integer vector operand. The lower 64 bits are moved to the
+/// destination.
+/// \returns A 64-bit integer containing the lower 64 bits of the parameter.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_movepi64_pi64(__m128i __a)
{
return (__m64)__a[0];
}
+/// \brief Moves the 64-bit operand to a 128-bit integer vector, zeroing the
+/// upper bits.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ / MOVD </c> instruction.
+///
+/// \param __a
+/// A 64-bit value.
+/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
+/// the operand. The upper 64 bits are assigned zeros.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_movpi64_epi64(__m64 __a)
{
return (__m128i){ (long long)__a, 0 };
}
+/// \brief Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
+/// integer vector, zeroing the upper bits.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
+///
+/// \param __a
+/// A 128-bit integer vector operand. The lower 64 bits are moved to the
+/// destination.
+/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
+/// the operand. The upper 64 bits are assigned zeros.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_move_epi64(__m128i __a)
{
return __builtin_shufflevector((__v2di)__a, (__m128i){ 0 }, 0, 2);
}
+/// \brief Unpacks the high-order (odd-indexed) values from two 128-bit vectors
+/// of [2 x double] and interleaves them into a 128-bit vector of [2 x
+/// double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUNPCKHPD / UNPCKHPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. \n
+/// Bits [127:64] are written to bits [63:0] of the destination.
+/// \param __b
+/// A 128-bit vector of [2 x double]. \n
+/// Bits [127:64] are written to bits [127:64] of the destination.
+/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_unpackhi_pd(__m128d __a, __m128d __b)
{
return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
}
+/// \brief Unpacks the low-order (even-indexed) values from two 128-bit vectors
+/// of [2 x double] and interleaves them into a 128-bit vector of [2 x
+/// double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double]. \n
+/// Bits [63:0] are written to bits [63:0] of the destination.
+/// \param __b
+/// A 128-bit vector of [2 x double]. \n
+/// Bits [63:0] are written to bits [127:64] of the destination.
+/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_unpacklo_pd(__m128d __a, __m128d __b)
{
return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
}
+/// \brief Extracts the sign bits of the double-precision values in the 128-bit
+/// vector of [2 x double], zero-extends the value, and writes it to the
+/// low-order bits of the destination.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VMOVMSKPD / MOVMSKPD </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector of [2 x double] containing the values with sign bits to
+/// be extracted.
+/// \returns The sign bits from each of the double-precision elements in \a __a,
+/// written to bits [1:0]. The remaining bits are assigned values of zero.
static __inline__ int __DEFAULT_FN_ATTRS
_mm_movemask_pd(__m128d __a)
{
return __builtin_ia32_movmskpd((__v2df)__a);
}
+
+/// \brief Constructs a 128-bit floating-point vector of [2 x double] from two
+/// 128-bit vector parameters of [2 x double], using the immediate-value
+/// parameter as a specifier.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128d _mm_shuffle_pd(__m128d a, __m128d b, const int i);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VSHUFPD / SHUFPD </c> instruction.
+///
+/// \param a
+/// A 128-bit vector of [2 x double].
+/// \param b
+/// A 128-bit vector of [2 x double].
+/// \param i
+/// An 8-bit immediate value. The least significant two bits specify which
+/// elements to copy from a and b: \n
+/// Bit[0] = 0: lower element of a copied to lower element of result. \n
+/// Bit[0] = 1: upper element of a copied to lower element of result. \n
+/// Bit[1] = 0: lower element of \a b copied to upper element of result. \n
+/// Bit[1] = 1: upper element of \a b copied to upper element of result. \n
+/// \returns A 128-bit vector of [2 x double] containing the shuffled values.
#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
(__m128d)__builtin_shufflevector((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
0 + (((i) >> 0) & 0x1), \
2 + (((i) >> 1) & 0x1)); })
+/// \brief Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
+/// floating-point vector of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit floating-point vector of [2 x double].
+/// \returns A 128-bit floating-point vector of [4 x float] containing the same
+/// bitwise pattern as the parameter.
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_castpd_ps(__m128d __a)
{
return (__m128)__a;
}
+/// \brief Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
+/// integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit floating-point vector of [2 x double].
+/// \returns A 128-bit integer vector containing the same bitwise pattern as the
+/// parameter.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_castpd_si128(__m128d __a)
{
return (__m128i)__a;
}
+/// \brief Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
+/// floating-point vector of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit floating-point vector of [4 x float].
+/// \returns A 128-bit floating-point vector of [2 x double] containing the same
+/// bitwise pattern as the parameter.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_castps_pd(__m128 __a)
{
return (__m128d)__a;
}
+/// \brief Casts a 128-bit floating-point vector of [4 x float] into a 128-bit
+/// integer vector.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit floating-point vector of [4 x float].
+/// \returns A 128-bit integer vector containing the same bitwise pattern as the
+/// parameter.
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_castps_si128(__m128 __a)
{
return (__m128i)__a;
}
+/// \brief Casts a 128-bit integer vector into a 128-bit floating-point vector
+/// of [4 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \returns A 128-bit floating-point vector of [4 x float] containing the same
+/// bitwise pattern as the parameter.
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_castsi128_ps(__m128i __a)
{
return (__m128)__a;
}
+/// \brief Casts a 128-bit integer vector into a 128-bit floating-point vector
+/// of [2 x double].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic has no corresponding instruction.
+///
+/// \param __a
+/// A 128-bit integer vector.
+/// \returns A 128-bit floating-point vector of [2 x double] containing the same
+/// bitwise pattern as the parameter.
static __inline__ __m128d __DEFAULT_FN_ATTRS
_mm_castsi128_pd(__m128i __a)
{
return (__m128d)__a;
}
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_pause(void)
-{
- __builtin_ia32_pause();
-}
+#if defined(__cplusplus)
+extern "C" {
+#endif
+/// \brief Indicates that a spin loop is being executed for the purposes of
+/// optimizing power consumption during the loop.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> PAUSE </c> instruction.
+///
+void _mm_pause(void);
+
+#if defined(__cplusplus)
+} // extern "C"
+#endif
#undef __DEFAULT_FN_ATTRS
#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
diff --git a/lib/Headers/f16cintrin.h b/lib/Headers/f16cintrin.h
index 415bf732fb9f..180712ffc680 100644
--- a/lib/Headers/f16cintrin.h
+++ b/lib/Headers/f16cintrin.h
@@ -37,7 +37,7 @@
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTPH2PS instruction.
+/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
///
/// \param __a
/// A 16-bit half-precision float value.
@@ -59,17 +59,17 @@ _cvtsh_ss(unsigned short __a)
/// unsigned short _cvtss_sh(float a, const int imm);
/// \endcode
///
-/// This intrinsic corresponds to the \c VCVTPS2PH instruction.
+/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
///
/// \param a
/// A 32-bit single-precision float value to be converted to a 16-bit
/// half-precision float value.
/// \param imm
-/// An immediate value controlling rounding using bits [2:0]:
-/// 000: Nearest
-/// 001: Down
-/// 010: Up
-/// 011: Truncate
+/// An immediate value controlling rounding using bits [2:0]: \n
+/// 000: Nearest \n
+/// 001: Down \n
+/// 010: Up \n
+/// 011: Truncate \n
/// 1XX: Use MXCSR.RC for rounding
/// \returns The converted 16-bit half-precision float value.
#define _cvtss_sh(a, imm) \
@@ -85,16 +85,16 @@ _cvtsh_ss(unsigned short __a)
/// __m128i _mm_cvtps_ph(__m128 a, const int imm);
/// \endcode
///
-/// This intrinsic corresponds to the \c VCVTPS2PH instruction.
+/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
///
/// \param a
/// A 128-bit vector containing 32-bit float values.
/// \param imm
-/// An immediate value controlling rounding using bits [2:0]:
-/// 000: Nearest
-/// 001: Down
-/// 010: Up
-/// 011: Truncate
+/// An immediate value controlling rounding using bits [2:0]: \n
+/// 000: Nearest \n
+/// 001: Down \n
+/// 010: Up \n
+/// 011: Truncate \n
/// 1XX: Use MXCSR.RC for rounding
/// \returns A 128-bit vector containing converted 16-bit half-precision float
/// values. The lower 64 bits are used to store the converted 16-bit
@@ -107,7 +107,7 @@ _cvtsh_ss(unsigned short __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTPH2PS instruction.
+/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
///
/// \param __a
/// A 128-bit vector containing 16-bit half-precision float values. The lower
diff --git a/lib/Headers/float.h b/lib/Headers/float.h
index a28269ebebbe..0f453d87cbcb 100644
--- a/lib/Headers/float.h
+++ b/lib/Headers/float.h
@@ -27,9 +27,12 @@
/* If we're on MinGW, fall back to the system's float.h, which might have
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
+ *
+ * Also fall back on Darwin to allow additional definitions and
+ * implementation-defined values.
*/
-#if (defined(__MINGW32__) || defined(_MSC_VER)) && __STDC_HOSTED__ && \
- __has_include_next(<float.h>)
+#if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \
+ __STDC_HOSTED__ && __has_include_next(<float.h>)
# include_next <float.h>
/* Undefine anything that we'll be redefining below. */
diff --git a/lib/Headers/fxsrintrin.h b/lib/Headers/fxsrintrin.h
index ac6026aa5ba2..786081ca8eab 100644
--- a/lib/Headers/fxsrintrin.h
+++ b/lib/Headers/fxsrintrin.h
@@ -30,25 +30,75 @@
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("fxsr")))
+/// \brief Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte
+/// memory region pointed to by the input parameter \a __p.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> FXSAVE </c> instruction.
+///
+/// \param __p
+/// A pointer to a 512-byte memory region. The beginning of this memory
+/// region should be aligned on a 16-byte boundary.
static __inline__ void __DEFAULT_FN_ATTRS
-_fxsave(void *__p) {
+_fxsave(void *__p)
+{
return __builtin_ia32_fxsave(__p);
}
+/// \brief Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte
+/// memory region pointed to by the input parameter \a __p. The contents of
+/// this memory region should have been written to by a previous \c _fxsave
+/// or \c _fxsave64 intrinsic.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> FXRSTOR </c> instruction.
+///
+/// \param __p
+/// A pointer to a 512-byte memory region. The beginning of this memory
+/// region should be aligned on a 16-byte boundary.
static __inline__ void __DEFAULT_FN_ATTRS
-_fxsave64(void *__p) {
- return __builtin_ia32_fxsave64(__p);
+_fxrstor(void *__p)
+{
+ return __builtin_ia32_fxrstor(__p);
}
+#ifdef __x86_64__
+/// \brief Saves the XMM, MMX, MXCSR and x87 FPU registers into a 512-byte
+/// memory region pointed to by the input parameter \a __p.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> FXSAVE64 </c> instruction.
+///
+/// \param __p
+/// A pointer to a 512-byte memory region. The beginning of this memory
+/// region should be aligned on a 16-byte boundary.
static __inline__ void __DEFAULT_FN_ATTRS
-_fxrstor(void *__p) {
- return __builtin_ia32_fxrstor(__p);
+_fxsave64(void *__p)
+{
+ return __builtin_ia32_fxsave64(__p);
}
+/// \brief Restores the XMM, MMX, MXCSR and x87 FPU registers from the 512-byte
+/// memory region pointed to by the input parameter \a __p. The contents of
+/// this memory region should have been written to by a previous \c _fxsave
+/// or \c _fxsave64 intrinsic.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> FXRSTOR64 </c> instruction.
+///
+/// \param __p
+/// A pointer to a 512-byte memory region. The beginning of this memory
+/// region should be aligned on a 16-byte boundary.
static __inline__ void __DEFAULT_FN_ATTRS
-_fxrstor64(void *__p) {
+_fxrstor64(void *__p)
+{
return __builtin_ia32_fxrstor64(__p);
}
+#endif
#undef __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/ia32intrin.h b/lib/Headers/ia32intrin.h
index 397f3fd13e01..4928300103ad 100644
--- a/lib/Headers/ia32intrin.h
+++ b/lib/Headers/ia32intrin.h
@@ -60,12 +60,6 @@ __rdpmc(int __A) {
return __builtin_ia32_rdpmc(__A);
}
-/* __rdtsc */
-static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
-__rdtsc(void) {
- return __builtin_ia32_rdtsc();
-}
-
/* __rdtscp */
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
__rdtscp(unsigned int *__A) {
diff --git a/lib/Headers/immintrin.h b/lib/Headers/immintrin.h
index 4b2752353d6f..7f91d49fbcec 100644
--- a/lib/Headers/immintrin.h
+++ b/lib/Headers/immintrin.h
@@ -69,9 +69,44 @@
Intel documents these as being in immintrin.h, and
they depend on typedefs from avxintrin.h. */
+/// \brief Converts a 256-bit vector of [8 x float] into a 128-bit vector
+/// containing 16-bit half-precision float values.
+///
+/// \headerfile <x86intrin.h>
+///
+/// \code
+/// __m128i _mm256_cvtps_ph(__m256 a, const int imm);
+/// \endcode
+///
+/// This intrinsic corresponds to the <c> VCVTPS2PH </c> instruction.
+///
+/// \param a
+/// A 256-bit vector containing 32-bit single-precision float values to be
+/// converted to 16-bit half-precision float values.
+/// \param imm
+/// An immediate value controlling rounding using bits [2:0]: \n
+/// 000: Nearest \n
+/// 001: Down \n
+/// 010: Up \n
+/// 011: Truncate \n
+/// 1XX: Use MXCSR.RC for rounding
+/// \returns A 128-bit vector containing the converted 16-bit half-precision
+/// float values.
#define _mm256_cvtps_ph(a, imm) __extension__ ({ \
(__m128i)__builtin_ia32_vcvtps2ph256((__v8sf)(__m256)(a), (imm)); })
+/// \brief Converts a 128-bit vector containing 16-bit half-precision float
+/// values into a 256-bit vector of [8 x float].
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the <c> VCVTPH2PS </c> instruction.
+///
+/// \param __a
+/// A 128-bit vector containing 16-bit half-precision float values to be
+/// converted to 32-bit single-precision float values.
+/// \returns A vector of [8 x float] containing the converted 32-bit
+/// single-precision float values.
static __inline __m256 __attribute__((__always_inline__, __nodebug__, __target__("f16c")))
_mm256_cvtph_ps(__m128i __a)
{
diff --git a/lib/Headers/intrin.h b/lib/Headers/intrin.h
index f18711ad1ecf..7c91ebaee8cb 100644
--- a/lib/Headers/intrin.h
+++ b/lib/Headers/intrin.h
@@ -34,6 +34,10 @@
#include <x86intrin.h>
#endif
+#if defined(__arm__)
+#include <armintr.h>
+#endif
+
/* For the definition of jmp_buf. */
#if __STDC_HOSTED__
#include <setjmp.h>
@@ -62,7 +66,9 @@ void __cpuid(int[4], int);
static __inline__
void __cpuidex(int[4], int, int);
void __debugbreak(void);
+static __inline__
__int64 __emul(int, int);
+static __inline__
unsigned __int64 __emulu(unsigned int, unsigned int);
void __cdecl __fastfail(unsigned int);
unsigned int __getcallerseflags(void);
@@ -93,6 +99,7 @@ static __inline__
void __movsd(unsigned long *, unsigned long const *, size_t);
static __inline__
void __movsw(unsigned short *, unsigned short const *, size_t);
+static __inline__
void __nop(void);
void __nvreg_restore_fence(void);
void __nvreg_save_fence(void);
@@ -249,10 +256,12 @@ static __inline__
unsigned long __cdecl _lrotl(unsigned long, int);
static __inline__
unsigned long __cdecl _lrotr(unsigned long, int);
-static __inline__
-void _ReadBarrier(void);
-static __inline__
-void _ReadWriteBarrier(void);
+static __inline__ void
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_ReadBarrier(void);
+static __inline__ void
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_ReadWriteBarrier(void);
static __inline__
void *_ReturnAddress(void);
unsigned int _rorx_u32(unsigned int, const unsigned int);
@@ -281,8 +290,9 @@ unsigned int _shrx_u32(unsigned int, unsigned int);
void _Store_HLERelease(long volatile *, long);
void _Store64_HLERelease(__int64 volatile *, __int64);
void _StorePointer_HLERelease(void *volatile *, void *);
-static __inline__
-void _WriteBarrier(void);
+static __inline__ void
+__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
+_WriteBarrier(void);
unsigned __int32 xbegin(void);
void _xend(void);
static __inline__
@@ -307,7 +317,6 @@ void __lwpval64(unsigned __int64, unsigned int, unsigned int);
unsigned __int64 __lzcnt64(unsigned __int64);
static __inline__
void __movsq(unsigned long long *, unsigned long long const *, size_t);
-__int64 __mulh(__int64, __int64);
static __inline__
unsigned __int64 __popcnt64(unsigned __int64);
static __inline__
@@ -378,30 +387,15 @@ void *_InterlockedCompareExchangePointer(void *volatile *_Destination,
void *_Exchange, void *_Comparand);
void *_InterlockedCompareExchangePointer_np(void *volatile *_Destination,
void *_Exchange, void *_Comparand);
-static __inline__
-__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
-static __inline__
-__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
-static __inline__
-__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
void *_InterlockedExchangePointer(void *volatile *_Target, void *_Value);
-static __inline__
-__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
long _InterlockedOr_np(long volatile *_Value, long _Mask);
short _InterlockedOr16_np(short volatile *_Value, short _Mask);
-static __inline__
-__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
__int64 _InterlockedOr64_np(__int64 volatile *_Value, __int64 _Mask);
char _InterlockedOr8_np(char volatile *_Value, char _Mask);
long _InterlockedXor_np(long volatile *_Value, long _Mask);
short _InterlockedXor16_np(short volatile *_Value, short _Mask);
-static __inline__
-__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
__int64 _InterlockedXor64_np(__int64 volatile *_Value, __int64 _Mask);
char _InterlockedXor8_np(char volatile *_Value, char _Mask);
-static __inline__
-__int64 _mul128(__int64 _Multiplier, __int64 _Multiplicand,
- __int64 *_HighProduct);
unsigned __int64 _rorx_u64(unsigned __int64, const unsigned int);
__int64 _sarx_i64(__int64, unsigned int);
#if __STDC_HOSTED__
@@ -409,119 +403,44 @@ int __cdecl _setjmpex(jmp_buf);
#endif
unsigned __int64 _shlx_u64(unsigned __int64, unsigned int);
unsigned __int64 _shrx_u64(unsigned __int64, unsigned int);
-/*
- * Multiply two 64-bit integers and obtain a 64-bit result.
- * The low-half is returned directly and the high half is in an out parameter.
- */
-static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-_umul128(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand,
- unsigned __int64 *_HighProduct) {
- unsigned __int128 _FullProduct =
- (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
- *_HighProduct = _FullProduct >> 64;
- return _FullProduct;
-}
-static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-__umulh(unsigned __int64 _Multiplier, unsigned __int64 _Multiplicand) {
- unsigned __int128 _FullProduct =
- (unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
- return _FullProduct >> 64;
-}
+static __inline__
+__int64 __mulh(__int64, __int64);
+static __inline__
+unsigned __int64 __umulh(unsigned __int64, unsigned __int64);
+static __inline__
+__int64 _mul128(__int64, __int64, __int64*);
+static __inline__
+unsigned __int64 _umul128(unsigned __int64,
+ unsigned __int64,
+ unsigned __int64*);
#endif /* __x86_64__ */
-/*----------------------------------------------------------------------------*\
-|* Multiplication
-\*----------------------------------------------------------------------------*/
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-__emul(int __in1, int __in2) {
- return (__int64)__in1 * (__int64)__in2;
-}
-static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-__emulu(unsigned int __in1, unsigned int __in2) {
- return (unsigned __int64)__in1 * (unsigned __int64)__in2;
-}
-/*----------------------------------------------------------------------------*\
-|* Bit Twiddling
-\*----------------------------------------------------------------------------*/
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_rotl8(unsigned char _Value, unsigned char _Shift) {
- _Shift &= 0x7;
- return _Shift ? (_Value << _Shift) | (_Value >> (8 - _Shift)) : _Value;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_rotr8(unsigned char _Value, unsigned char _Shift) {
- _Shift &= 0x7;
- return _Shift ? (_Value >> _Shift) | (_Value << (8 - _Shift)) : _Value;
-}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS
-_rotl16(unsigned short _Value, unsigned char _Shift) {
- _Shift &= 0xf;
- return _Shift ? (_Value << _Shift) | (_Value >> (16 - _Shift)) : _Value;
-}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS
-_rotr16(unsigned short _Value, unsigned char _Shift) {
- _Shift &= 0xf;
- return _Shift ? (_Value >> _Shift) | (_Value << (16 - _Shift)) : _Value;
-}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_rotl(unsigned int _Value, int _Shift) {
- _Shift &= 0x1f;
- return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
-}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_rotr(unsigned int _Value, int _Shift) {
- _Shift &= 0x1f;
- return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
-}
-static __inline__ unsigned long __DEFAULT_FN_ATTRS
-_lrotl(unsigned long _Value, int _Shift) {
- _Shift &= 0x1f;
- return _Shift ? (_Value << _Shift) | (_Value >> (32 - _Shift)) : _Value;
-}
-static __inline__ unsigned long __DEFAULT_FN_ATTRS
-_lrotr(unsigned long _Value, int _Shift) {
- _Shift &= 0x1f;
- return _Shift ? (_Value >> _Shift) | (_Value << (32 - _Shift)) : _Value;
-}
-static
-__inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-_rotl64(unsigned __int64 _Value, int _Shift) {
- _Shift &= 0x3f;
- return _Shift ? (_Value << _Shift) | (_Value >> (64 - _Shift)) : _Value;
-}
-static
-__inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-_rotr64(unsigned __int64 _Value, int _Shift) {
- _Shift &= 0x3f;
- return _Shift ? (_Value >> _Shift) | (_Value << (64 - _Shift)) : _Value;
-}
+#if defined(__x86_64__) || defined(__arm__)
+
+static __inline__
+__int64 _InterlockedDecrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedExchange64(__int64 volatile *_Target, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value);
+static __inline__
+__int64 _InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value);
+static __inline__
+__int64 _InterlockedIncrement64(__int64 volatile *_Addend);
+static __inline__
+__int64 _InterlockedOr64(__int64 volatile *_Value, __int64 _Mask);
+static __inline__
+__int64 _InterlockedXor64(__int64 volatile *_Value, __int64 _Mask);
+static __inline__
+__int64 _InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask);
+
+#endif
+
/*----------------------------------------------------------------------------*\
|* Bit Counting and Testing
\*----------------------------------------------------------------------------*/
static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_BitScanForward(unsigned long *_Index, unsigned long _Mask) {
- if (!_Mask)
- return 0;
- *_Index = __builtin_ctzl(_Mask);
- return 1;
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_BitScanReverse(unsigned long *_Index, unsigned long _Mask) {
- if (!_Mask)
- return 0;
- *_Index = 31 - __builtin_clzl(_Mask);
- return 1;
-}
-static __inline__ unsigned short __DEFAULT_FN_ATTRS
-__popcnt16(unsigned short _Value) {
- return __builtin_popcount((int)_Value);
-}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-__popcnt(unsigned int _Value) {
- return __builtin_popcount(_Value);
-}
-static __inline__ unsigned char __DEFAULT_FN_ATTRS
_bittest(long const *_BitBase, long _BitPos) {
return (*_BitBase >> _BitPos) & 1;
}
@@ -548,26 +467,24 @@ _interlockedbittestandset(long volatile *_BitBase, long _BitPos) {
long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_SEQ_CST);
return (_PrevVal >> _BitPos) & 1;
}
-#ifdef __x86_64__
+#if defined(__arm__) || defined(__aarch64__)
static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_BitScanForward64(unsigned long *_Index, unsigned __int64 _Mask) {
- if (!_Mask)
- return 0;
- *_Index = __builtin_ctzll(_Mask);
- return 1;
+_interlockedbittestandset_acq(long volatile *_BitBase, long _BitPos) {
+ long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_ACQUIRE);
+ return (_PrevVal >> _BitPos) & 1;
}
static __inline__ unsigned char __DEFAULT_FN_ATTRS
-_BitScanReverse64(unsigned long *_Index, unsigned __int64 _Mask) {
- if (!_Mask)
- return 0;
- *_Index = 63 - __builtin_clzll(_Mask);
- return 1;
+_interlockedbittestandset_nf(long volatile *_BitBase, long _BitPos) {
+ long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_RELAXED);
+ return (_PrevVal >> _BitPos) & 1;
}
-static __inline__
-unsigned __int64 __DEFAULT_FN_ATTRS
-__popcnt64(unsigned __int64 _Value) {
- return __builtin_popcountll(_Value);
+static __inline__ unsigned char __DEFAULT_FN_ATTRS
+_interlockedbittestandset_rel(long volatile *_BitBase, long _BitPos) {
+ long _PrevVal = __atomic_fetch_or(_BitBase, 1l << _BitPos, __ATOMIC_RELEASE);
+ return (_PrevVal >> _BitPos) & 1;
}
+#endif
+#ifdef __x86_64__
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_bittest64(__int64 const *_BitBase, __int64 _BitPos) {
return (*_BitBase >> _BitPos) & 1;
@@ -600,196 +517,449 @@ _interlockedbittestandset64(__int64 volatile *_BitBase, __int64 _BitPos) {
/*----------------------------------------------------------------------------*\
|* Interlocked Exchange Add
\*----------------------------------------------------------------------------*/
+#if defined(__arm__) || defined(__aarch64__)
static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd8(char volatile *_Addend, char _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+_InterlockedExchangeAdd8_acq(char volatile *_Addend, char _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
}
-static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd16(short volatile *_Addend, short _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
-}
-#ifdef __x86_64__
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeAdd64(__int64 volatile *_Addend, __int64 _Value) {
- return __atomic_fetch_add(_Addend, _Value, __ATOMIC_SEQ_CST);
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd8_nf(char volatile *_Addend, char _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
}
-#endif
-/*----------------------------------------------------------------------------*\
-|* Interlocked Exchange Sub
-\*----------------------------------------------------------------------------*/
static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub8(char volatile *_Subend, char _Value) {
- return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+_InterlockedExchangeAdd8_rel(char volatile *_Addend, char _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd16_acq(short volatile *_Addend, short _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd16_nf(short volatile *_Addend, short _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
}
static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub16(short volatile *_Subend, short _Value) {
- return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+_InterlockedExchangeAdd16_rel(short volatile *_Addend, short _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
}
static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub(long volatile *_Subend, long _Value) {
- return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+_InterlockedExchangeAdd_acq(long volatile *_Addend, long _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd_nf(long volatile *_Addend, long _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd_rel(long volatile *_Addend, long _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
}
-#ifdef __x86_64__
static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchangeSub64(__int64 volatile *_Subend, __int64 _Value) {
- return __atomic_fetch_sub(_Subend, _Value, __ATOMIC_SEQ_CST);
+_InterlockedExchangeAdd64_acq(__int64 volatile *_Addend, __int64 _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_ACQUIRE);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd64_nf(__int64 volatile *_Addend, __int64 _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELAXED);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchangeAdd64_rel(__int64 volatile *_Addend, __int64 _Value) {
+ return __atomic_fetch_add(_Addend, _Value, __ATOMIC_RELEASE);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Increment
\*----------------------------------------------------------------------------*/
+#if defined(__arm__) || defined(__aarch64__)
static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedIncrement16(short volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+_InterlockedIncrement16_acq(short volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedIncrement16_nf(short volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedIncrement16_rel(short volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedIncrement_acq(long volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedIncrement_nf(long volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedIncrement_rel(long volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
}
-#ifdef __x86_64__
static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedIncrement64(__int64 volatile *_Value) {
- return __atomic_add_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+_InterlockedIncrement64_acq(__int64 volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_ACQUIRE);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedIncrement64_nf(__int64 volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_RELAXED);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedIncrement64_rel(__int64 volatile *_Value) {
+ return __atomic_add_fetch(_Value, 1, __ATOMIC_RELEASE);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Decrement
\*----------------------------------------------------------------------------*/
+#if defined(__arm__) || defined(__aarch64__)
static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedDecrement16(short volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+_InterlockedDecrement16_acq(short volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedDecrement16_nf(short volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedDecrement16_rel(short volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedDecrement_acq(long volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedDecrement_nf(long volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedDecrement_rel(long volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedDecrement64_acq(__int64 volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_ACQUIRE);
}
-#ifdef __x86_64__
static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedDecrement64(__int64 volatile *_Value) {
- return __atomic_sub_fetch(_Value, 1, __ATOMIC_SEQ_CST);
+_InterlockedDecrement64_nf(__int64 volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELAXED);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedDecrement64_rel(__int64 volatile *_Value) {
+ return __atomic_sub_fetch(_Value, 1, __ATOMIC_RELEASE);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked And
\*----------------------------------------------------------------------------*/
+#if defined(__arm__) || defined(__aarch64__)
static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedAnd8(char volatile *_Value, char _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedAnd8_acq(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedAnd8_nf(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
+}
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedAnd8_rel(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedAnd16_acq(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedAnd16_nf(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
}
static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedAnd16(short volatile *_Value, short _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedAnd16_rel(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
}
static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedAnd(long volatile *_Value, long _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedAnd_acq(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedAnd_nf(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedAnd_rel(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedAnd64_acq(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_ACQUIRE);
}
-#ifdef __x86_64__
static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedAnd64(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_and(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedAnd64_nf(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELAXED);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedAnd64_rel(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_and(_Value, _Mask, __ATOMIC_RELEASE);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Or
\*----------------------------------------------------------------------------*/
+#if defined(__arm__) || defined(__aarch64__)
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedOr8_acq(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedOr8_nf(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
+}
static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedOr8(char volatile *_Value, char _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedOr8_rel(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedOr16_acq(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedOr16_nf(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
}
static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedOr16(short volatile *_Value, short _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedOr16_rel(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
}
static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedOr(long volatile *_Value, long _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedOr_acq(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedOr_nf(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedOr_rel(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedOr64_acq(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedOr64_nf(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELAXED);
}
-#ifdef __x86_64__
static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedOr64(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_or(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedOr64_rel(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_or(_Value, _Mask, __ATOMIC_RELEASE);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Xor
\*----------------------------------------------------------------------------*/
+#if defined(__arm__) || defined(__aarch64__)
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedXor8_acq(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedXor8_nf(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
+}
static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedXor8(char volatile *_Value, char _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedXor8_rel(char volatile *_Value, char _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedXor16_acq(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedXor16_nf(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
}
static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedXor16(short volatile *_Value, short _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedXor16_rel(short volatile *_Value, short _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
}
static __inline__ long __DEFAULT_FN_ATTRS
-_InterlockedXor(long volatile *_Value, long _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedXor_acq(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedXor_nf(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedXor_rel(long volatile *_Value, long _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedXor64_acq(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_ACQUIRE);
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedXor64_nf(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELAXED);
}
-#ifdef __x86_64__
static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedXor64(__int64 volatile *_Value, __int64 _Mask) {
- return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_SEQ_CST);
+_InterlockedXor64_rel(__int64 volatile *_Value, __int64 _Mask) {
+ return __atomic_fetch_xor(_Value, _Mask, __ATOMIC_RELEASE);
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Exchange
\*----------------------------------------------------------------------------*/
+#if defined(__arm__) || defined(__aarch64__)
static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedExchange8(char volatile *_Target, char _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+_InterlockedExchange8_acq(char volatile *_Target, char _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
+ return _Value;
+}
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchange8_nf(char volatile *_Target, char _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
+ return _Value;
+}
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedExchange8_rel(char volatile *_Target, char _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
return _Value;
}
static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedExchange16(short volatile *_Target, short _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+_InterlockedExchange16_acq(short volatile *_Target, short _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
+ return _Value;
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchange16_nf(short volatile *_Target, short _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
+ return _Value;
+}
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedExchange16_rel(short volatile *_Target, short _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
+ return _Value;
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchange_acq(long volatile *_Target, long _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
+ return _Value;
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchange_nf(long volatile *_Target, long _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
+ return _Value;
+}
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedExchange_rel(long volatile *_Target, long _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
+ return _Value;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchange64_acq(__int64 volatile *_Target, __int64 _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_ACQUIRE);
+ return _Value;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedExchange64_nf(__int64 volatile *_Target, __int64 _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELAXED);
return _Value;
}
-#ifdef __x86_64__
static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedExchange64(__int64 volatile *_Target, __int64 _Value) {
- __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_SEQ_CST);
+_InterlockedExchange64_rel(__int64 volatile *_Target, __int64 _Value) {
+ __atomic_exchange(_Target, &_Value, &_Value, __ATOMIC_RELEASE);
return _Value;
}
#endif
/*----------------------------------------------------------------------------*\
|* Interlocked Compare Exchange
\*----------------------------------------------------------------------------*/
+#if defined(__arm__) || defined(__aarch64__)
static __inline__ char __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange8(char volatile *_Destination,
+_InterlockedCompareExchange8_acq(char volatile *_Destination,
char _Exchange, char _Comparand) {
__atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
+ return _Comparand;
+}
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange8_nf(char volatile *_Destination,
+ char _Exchange, char _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
+ return _Comparand;
+}
+static __inline__ char __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange8_rel(char volatile *_Destination,
+ char _Exchange, char _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
return _Comparand;
}
static __inline__ short __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange16(short volatile *_Destination,
+_InterlockedCompareExchange16_acq(short volatile *_Destination,
short _Exchange, short _Comparand) {
__atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
return _Comparand;
}
-static __inline__ __int64 __DEFAULT_FN_ATTRS
-_InterlockedCompareExchange64(__int64 volatile *_Destination,
- __int64 _Exchange, __int64 _Comparand) {
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange16_nf(short volatile *_Destination,
+ short _Exchange, short _Comparand) {
__atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
- __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+ __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
return _Comparand;
}
-/*----------------------------------------------------------------------------*\
-|* Barriers
-\*----------------------------------------------------------------------------*/
-static __inline__ void __DEFAULT_FN_ATTRS
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_ReadWriteBarrier(void) {
- __atomic_signal_fence(__ATOMIC_SEQ_CST);
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange16_rel(short volatile *_Destination,
+ short _Exchange, short _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
+ return _Comparand;
}
-static __inline__ void __DEFAULT_FN_ATTRS
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_ReadBarrier(void) {
- __atomic_signal_fence(__ATOMIC_SEQ_CST);
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange_acq(long volatile *_Destination,
+ long _Exchange, long _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
+ return _Comparand;
}
-static __inline__ void __DEFAULT_FN_ATTRS
-__attribute__((__deprecated__("use other intrinsics or C++11 atomics instead")))
-_WriteBarrier(void) {
- __atomic_signal_fence(__ATOMIC_SEQ_CST);
+static __inline__ long __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange_nf(long volatile *_Destination,
+ long _Exchange, long _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
+ return _Comparand;
}
-#ifdef __x86_64__
-static __inline__ void __DEFAULT_FN_ATTRS
-__faststorefence(void) {
- __atomic_thread_fence(__ATOMIC_SEQ_CST);
+static __inline__ short __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange_rel(long volatile *_Destination,
+ long _Exchange, long _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
+ return _Comparand;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange64_acq(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_ACQUIRE);
+ return _Comparand;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange64_nf(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_RELAXED);
+ return _Comparand;
+}
+static __inline__ __int64 __DEFAULT_FN_ATTRS
+_InterlockedCompareExchange64_rel(__int64 volatile *_Destination,
+ __int64 _Exchange, __int64 _Comparand) {
+ __atomic_compare_exchange(_Destination, &_Comparand, &_Exchange, 0,
+ __ATOMIC_SEQ_CST, __ATOMIC_RELEASE);
+ return _Comparand;
}
#endif
/*----------------------------------------------------------------------------*\
@@ -840,59 +1010,39 @@ __readgsqword(unsigned long __offset) {
#if defined(__i386__) || defined(__x86_64__)
static __inline__ void __DEFAULT_FN_ATTRS
__movsb(unsigned char *__dst, unsigned char const *__src, size_t __n) {
- __asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n)
- : "%edi", "%esi", "%ecx");
+ __asm__("rep movsb" : : "D"(__dst), "S"(__src), "c"(__n));
}
static __inline__ void __DEFAULT_FN_ATTRS
__movsd(unsigned long *__dst, unsigned long const *__src, size_t __n) {
- __asm__("rep movsl" : : "D"(__dst), "S"(__src), "c"(__n)
- : "%edi", "%esi", "%ecx");
+ __asm__("rep movsl" : : "D"(__dst), "S"(__src), "c"(__n));
}
static __inline__ void __DEFAULT_FN_ATTRS
__movsw(unsigned short *__dst, unsigned short const *__src, size_t __n) {
- __asm__("rep movsw" : : "D"(__dst), "S"(__src), "c"(__n)
- : "%edi", "%esi", "%ecx");
-}
-static __inline__ void __DEFAULT_FN_ATTRS
-__stosb(unsigned char *__dst, unsigned char __x, size_t __n) {
- __asm__("rep stosb" : : "D"(__dst), "a"(__x), "c"(__n)
- : "%edi", "%ecx");
+ __asm__("rep movsw" : : "D"(__dst), "S"(__src), "c"(__n));
}
static __inline__ void __DEFAULT_FN_ATTRS
__stosd(unsigned long *__dst, unsigned long __x, size_t __n) {
- __asm__("rep stosl" : : "D"(__dst), "a"(__x), "c"(__n)
- : "%edi", "%ecx");
+ __asm__("rep stosl" : : "D"(__dst), "a"(__x), "c"(__n));
}
static __inline__ void __DEFAULT_FN_ATTRS
__stosw(unsigned short *__dst, unsigned short __x, size_t __n) {
- __asm__("rep stosw" : : "D"(__dst), "a"(__x), "c"(__n)
- : "%edi", "%ecx");
+ __asm__("rep stosw" : : "D"(__dst), "a"(__x), "c"(__n));
}
#endif
#ifdef __x86_64__
static __inline__ void __DEFAULT_FN_ATTRS
__movsq(unsigned long long *__dst, unsigned long long const *__src, size_t __n) {
- __asm__("rep movsq" : : "D"(__dst), "S"(__src), "c"(__n)
- : "%edi", "%esi", "%ecx");
+ __asm__("rep movsq" : : "D"(__dst), "S"(__src), "c"(__n));
}
static __inline__ void __DEFAULT_FN_ATTRS
__stosq(unsigned __int64 *__dst, unsigned __int64 __x, size_t __n) {
- __asm__("rep stosq" : : "D"(__dst), "a"(__x), "c"(__n)
- : "%edi", "%ecx");
+ __asm__("rep stosq" : : "D"(__dst), "a"(__x), "c"(__n));
}
#endif
/*----------------------------------------------------------------------------*\
|* Misc
\*----------------------------------------------------------------------------*/
-static __inline__ void * __DEFAULT_FN_ATTRS
-_AddressOfReturnAddress(void) {
- return (void*)((char*)__builtin_frame_address(0) + sizeof(void*));
-}
-static __inline__ void * __DEFAULT_FN_ATTRS
-_ReturnAddress(void) {
- return __builtin_return_address(0);
-}
#if defined(__i386__) || defined(__x86_64__)
static __inline__ void __DEFAULT_FN_ATTRS
__cpuid(int __info[4], int __level) {
@@ -914,6 +1064,10 @@ static __inline__ void __DEFAULT_FN_ATTRS
__halt(void) {
__asm__ volatile ("hlt");
}
+static __inline__ void __DEFAULT_FN_ATTRS
+__nop(void) {
+ __asm__ volatile ("nop");
+}
#endif
/*----------------------------------------------------------------------------*\
diff --git a/lib/Headers/lzcntintrin.h b/lib/Headers/lzcntintrin.h
index 4c00e42ac3a9..3d2769da3bae 100644
--- a/lib/Headers/lzcntintrin.h
+++ b/lib/Headers/lzcntintrin.h
@@ -31,18 +31,48 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("lzcnt")))
+/// \brief Counts the number of leading zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c LZCNT instruction.
+///
+/// \param __X
+/// An unsigned 16-bit integer whose leading zeros are to be counted.
+/// \returns An unsigned 16-bit integer containing the number of leading zero
+/// bits in the operand.
static __inline__ unsigned short __DEFAULT_FN_ATTRS
__lzcnt16(unsigned short __X)
{
return __X ? __builtin_clzs(__X) : 16;
}
+/// \brief Counts the number of leading zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c LZCNT instruction.
+///
+/// \param __X
+/// An unsigned 32-bit integer whose leading zeros are to be counted.
+/// \returns An unsigned 32-bit integer containing the number of leading zero
+/// bits in the operand.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
__lzcnt32(unsigned int __X)
{
return __X ? __builtin_clz(__X) : 32;
}
+/// \brief Counts the number of leading zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c LZCNT instruction.
+///
+/// \param __X
+/// An unsigned 32-bit integer whose leading zeros are to be counted.
+/// \returns An unsigned 32-bit integer containing the number of leading zero
+/// bits in the operand.
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_lzcnt_u32(unsigned int __X)
{
@@ -50,12 +80,32 @@ _lzcnt_u32(unsigned int __X)
}
#ifdef __x86_64__
+/// \brief Counts the number of leading zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c LZCNT instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose leading zeros are to be counted.
+/// \returns An unsigned 64-bit integer containing the number of leading zero
+/// bits in the operand.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
__lzcnt64(unsigned long long __X)
{
return __X ? __builtin_clzll(__X) : 64;
}
+/// \brief Counts the number of leading zero bits in the operand.
+///
+/// \headerfile <x86intrin.h>
+///
+/// This intrinsic corresponds to the \c LZCNT instruction.
+///
+/// \param __X
+/// An unsigned 64-bit integer whose leading zeros are to be counted.
+/// \returns An unsigned 64-bit integer containing the number of leading zero
+/// bits in the operand.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS
_lzcnt_u64(unsigned long long __X)
{
diff --git a/lib/Headers/mmintrin.h b/lib/Headers/mmintrin.h
index cefd6053aa80..e0c277a65a33 100644
--- a/lib/Headers/mmintrin.h
+++ b/lib/Headers/mmintrin.h
@@ -39,7 +39,7 @@ typedef char __v8qi __attribute__((__vector_size__(8)));
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c EMMS instruction.
+/// This intrinsic corresponds to the <c> EMMS </c> instruction.
///
static __inline__ void __DEFAULT_FN_ATTRS
_mm_empty(void)
@@ -52,7 +52,7 @@ _mm_empty(void)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
///
/// \param __i
/// A 32-bit integer value.
@@ -69,7 +69,7 @@ _mm_cvtsi32_si64(int __i)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVD / MOVD instruction.
+/// This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction.
///
/// \param __m
/// A 64-bit integer vector.
@@ -85,7 +85,7 @@ _mm_cvtsi64_si32(__m64 __m)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVQ / MOVD instruction.
+/// This intrinsic corresponds to the <c> VMOVQ / MOVD </c> instruction.
///
/// \param __i
/// A 64-bit signed integer.
@@ -101,7 +101,7 @@ _mm_cvtsi64_m64(long long __i)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVQ / MOVD instruction.
+/// This intrinsic corresponds to the <c> VMOVQ / MOVD </c> instruction.
///
/// \param __m
/// A 64-bit integer vector.
@@ -121,7 +121,7 @@ _mm_cvtm64_si64(__m64 __m)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PACKSSWB instruction.
+/// This intrinsic corresponds to the <c> PACKSSWB </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
@@ -151,7 +151,7 @@ _mm_packs_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PACKSSDW instruction.
+/// This intrinsic corresponds to the <c> PACKSSDW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [2 x i32]. Each 32-bit element is treated as a
@@ -181,7 +181,7 @@ _mm_packs_pi32(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PACKUSWB instruction.
+/// This intrinsic corresponds to the <c> PACKUSWB </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16]. Each 16-bit element is treated as a
@@ -208,19 +208,19 @@ _mm_packs_pu16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PUNPCKHBW instruction.
+/// This intrinsic corresponds to the <c> PUNPCKHBW </c> instruction.
///
/// \param __m1
-/// A 64-bit integer vector of [8 x i8].
-/// Bits [39:32] are written to bits [7:0] of the result.
-/// Bits [47:40] are written to bits [23:16] of the result.
-/// Bits [55:48] are written to bits [39:32] of the result.
+/// A 64-bit integer vector of [8 x i8]. \n
+/// Bits [39:32] are written to bits [7:0] of the result. \n
+/// Bits [47:40] are written to bits [23:16] of the result. \n
+/// Bits [55:48] are written to bits [39:32] of the result. \n
/// Bits [63:56] are written to bits [55:48] of the result.
/// \param __m2
/// A 64-bit integer vector of [8 x i8].
-/// Bits [39:32] are written to bits [15:8] of the result.
-/// Bits [47:40] are written to bits [31:24] of the result.
-/// Bits [55:48] are written to bits [47:40] of the result.
+/// Bits [39:32] are written to bits [15:8] of the result. \n
+/// Bits [47:40] are written to bits [31:24] of the result. \n
+/// Bits [55:48] are written to bits [47:40] of the result. \n
/// Bits [63:56] are written to bits [63:56] of the result.
/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved
/// values.
@@ -235,15 +235,15 @@ _mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PUNPCKHWD instruction.
+/// This intrinsic corresponds to the <c> PUNPCKHWD </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16].
-/// Bits [47:32] are written to bits [15:0] of the result.
+/// Bits [47:32] are written to bits [15:0] of the result. \n
/// Bits [63:48] are written to bits [47:32] of the result.
/// \param __m2
/// A 64-bit integer vector of [4 x i16].
-/// Bits [47:32] are written to bits [31:16] of the result.
+/// Bits [47:32] are written to bits [31:16] of the result. \n
/// Bits [63:48] are written to bits [63:48] of the result.
/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved
/// values.
@@ -258,7 +258,7 @@ _mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PUNPCKHDQ instruction.
+/// This intrinsic corresponds to the <c> PUNPCKHDQ </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [2 x i32]. The upper 32 bits are written to
@@ -279,19 +279,19 @@ _mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PUNPCKLBW instruction.
+/// This intrinsic corresponds to the <c> PUNPCKLBW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [8 x i8].
-/// Bits [7:0] are written to bits [7:0] of the result.
-/// Bits [15:8] are written to bits [23:16] of the result.
-/// Bits [23:16] are written to bits [39:32] of the result.
+/// Bits [7:0] are written to bits [7:0] of the result. \n
+/// Bits [15:8] are written to bits [23:16] of the result. \n
+/// Bits [23:16] are written to bits [39:32] of the result. \n
/// Bits [31:24] are written to bits [55:48] of the result.
/// \param __m2
/// A 64-bit integer vector of [8 x i8].
-/// Bits [7:0] are written to bits [15:8] of the result.
-/// Bits [15:8] are written to bits [31:24] of the result.
-/// Bits [23:16] are written to bits [47:40] of the result.
+/// Bits [7:0] are written to bits [15:8] of the result. \n
+/// Bits [15:8] are written to bits [31:24] of the result. \n
+/// Bits [23:16] are written to bits [47:40] of the result. \n
/// Bits [31:24] are written to bits [63:56] of the result.
/// \returns A 64-bit integer vector of [8 x i8] containing the interleaved
/// values.
@@ -306,15 +306,15 @@ _mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PUNPCKLWD instruction.
+/// This intrinsic corresponds to the <c> PUNPCKLWD </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16].
-/// Bits [15:0] are written to bits [15:0] of the result.
+/// Bits [15:0] are written to bits [15:0] of the result. \n
/// Bits [31:16] are written to bits [47:32] of the result.
/// \param __m2
/// A 64-bit integer vector of [4 x i16].
-/// Bits [15:0] are written to bits [31:16] of the result.
+/// Bits [15:0] are written to bits [31:16] of the result. \n
/// Bits [31:16] are written to bits [63:48] of the result.
/// \returns A 64-bit integer vector of [4 x i16] containing the interleaved
/// values.
@@ -329,7 +329,7 @@ _mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PUNPCKLDQ instruction.
+/// This intrinsic corresponds to the <c> PUNPCKLDQ </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [2 x i32]. The lower 32 bits are written to
@@ -352,7 +352,7 @@ _mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PADDB instruction.
+/// This intrinsic corresponds to the <c> PADDB </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [8 x i8].
@@ -373,7 +373,7 @@ _mm_add_pi8(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PADDW instruction.
+/// This intrinsic corresponds to the <c> PADDW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16].
@@ -394,7 +394,7 @@ _mm_add_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PADDD instruction.
+/// This intrinsic corresponds to the <c> PADDD </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [2 x i32].
@@ -416,7 +416,7 @@ _mm_add_pi32(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PADDSB instruction.
+/// This intrinsic corresponds to the <c> PADDSB </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [8 x i8].
@@ -439,7 +439,7 @@ _mm_adds_pi8(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PADDSW instruction.
+/// This intrinsic corresponds to the <c> PADDSW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16].
@@ -461,7 +461,7 @@ _mm_adds_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PADDUSB instruction.
+/// This intrinsic corresponds to the <c> PADDUSB </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [8 x i8].
@@ -483,7 +483,7 @@ _mm_adds_pu8(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PADDUSW instruction.
+/// This intrinsic corresponds to the <c> PADDUSW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16].
@@ -504,7 +504,7 @@ _mm_adds_pu16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSUBB instruction.
+/// This intrinsic corresponds to the <c> PSUBB </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [8 x i8] containing the minuends.
@@ -525,7 +525,7 @@ _mm_sub_pi8(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSUBW instruction.
+/// This intrinsic corresponds to the <c> PSUBW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16] containing the minuends.
@@ -546,7 +546,7 @@ _mm_sub_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSUBD instruction.
+/// This intrinsic corresponds to the <c> PSUBD </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [2 x i32] containing the minuends.
@@ -569,7 +569,7 @@ _mm_sub_pi32(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSUBSB instruction.
+/// This intrinsic corresponds to the <c> PSUBSB </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [8 x i8] containing the minuends.
@@ -592,7 +592,7 @@ _mm_subs_pi8(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSUBSW instruction.
+/// This intrinsic corresponds to the <c> PSUBSW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16] containing the minuends.
@@ -615,7 +615,7 @@ _mm_subs_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSUBUSB instruction.
+/// This intrinsic corresponds to the <c> PSUBUSB </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [8 x i8] containing the minuends.
@@ -638,7 +638,7 @@ _mm_subs_pu8(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSUBUSW instruction.
+/// This intrinsic corresponds to the <c> PSUBUSW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16] containing the minuends.
@@ -663,7 +663,7 @@ _mm_subs_pu16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PMADDWD instruction.
+/// This intrinsic corresponds to the <c> PMADDWD </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16].
@@ -684,7 +684,7 @@ _mm_madd_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PMULHW instruction.
+/// This intrinsic corresponds to the <c> PMULHW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16].
@@ -705,7 +705,7 @@ _mm_mulhi_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PMULLW instruction.
+/// This intrinsic corresponds to the <c> PMULLW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16].
@@ -727,14 +727,15 @@ _mm_mullo_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSLLW instruction.
+/// This intrinsic corresponds to the <c> PSLLW </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [4 x i16].
/// \param __count
/// A 64-bit integer vector interpreted as a single 64-bit integer.
/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted
-/// values. If __count is greater or equal to 16, the result is set to all 0.
+/// values. If \a __count is greater or equal to 16, the result is set to all
+/// 0.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_sll_pi16(__m64 __m, __m64 __count)
{
@@ -748,14 +749,15 @@ _mm_sll_pi16(__m64 __m, __m64 __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSLLW instruction.
+/// This intrinsic corresponds to the <c> PSLLW </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [4 x i16].
/// \param __count
/// A 32-bit integer value.
/// \returns A 64-bit integer vector of [4 x i16] containing the left-shifted
-/// values. If __count is greater or equal to 16, the result is set to all 0.
+/// values. If \a __count is greater or equal to 16, the result is set to all
+/// 0.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_slli_pi16(__m64 __m, int __count)
{
@@ -770,14 +772,15 @@ _mm_slli_pi16(__m64 __m, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSLLD instruction.
+/// This intrinsic corresponds to the <c> PSLLD </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [2 x i32].
/// \param __count
/// A 64-bit integer vector interpreted as a single 64-bit integer.
/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted
-/// values. If __count is greater or equal to 32, the result is set to all 0.
+/// values. If \a __count is greater or equal to 32, the result is set to all
+/// 0.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_sll_pi32(__m64 __m, __m64 __count)
{
@@ -791,14 +794,15 @@ _mm_sll_pi32(__m64 __m, __m64 __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSLLD instruction.
+/// This intrinsic corresponds to the <c> PSLLD </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [2 x i32].
/// \param __count
/// A 32-bit integer value.
/// \returns A 64-bit integer vector of [2 x i32] containing the left-shifted
-/// values. If __count is greater or equal to 32, the result is set to all 0.
+/// values. If \a __count is greater or equal to 32, the result is set to all
+/// 0.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_slli_pi32(__m64 __m, int __count)
{
@@ -811,14 +815,14 @@ _mm_slli_pi32(__m64 __m, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSLLQ instruction.
+/// This intrinsic corresponds to the <c> PSLLQ </c> instruction.
///
/// \param __m
/// A 64-bit integer vector interpreted as a single 64-bit integer.
/// \param __count
/// A 64-bit integer vector interpreted as a single 64-bit integer.
/// \returns A 64-bit integer vector containing the left-shifted value. If
-/// __count is greater or equal to 64, the result is set to 0.
+/// \a __count is greater or equal to 64, the result is set to 0.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_sll_si64(__m64 __m, __m64 __count)
{
@@ -831,14 +835,14 @@ _mm_sll_si64(__m64 __m, __m64 __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSLLQ instruction.
+/// This intrinsic corresponds to the <c> PSLLQ </c> instruction.
///
/// \param __m
/// A 64-bit integer vector interpreted as a single 64-bit integer.
/// \param __count
/// A 32-bit integer value.
/// \returns A 64-bit integer vector containing the left-shifted value. If
-/// __count is greater or equal to 64, the result is set to 0.
+/// \a __count is greater or equal to 64, the result is set to 0.
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_slli_si64(__m64 __m, int __count)
{
@@ -854,7 +858,7 @@ _mm_slli_si64(__m64 __m, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSRAW instruction.
+/// This intrinsic corresponds to the <c> PSRAW </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [4 x i16].
@@ -876,7 +880,7 @@ _mm_sra_pi16(__m64 __m, __m64 __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSRAW instruction.
+/// This intrinsic corresponds to the <c> PSRAW </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [4 x i16].
@@ -899,7 +903,7 @@ _mm_srai_pi16(__m64 __m, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSRAD instruction.
+/// This intrinsic corresponds to the <c> PSRAD </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [2 x i32].
@@ -921,7 +925,7 @@ _mm_sra_pi32(__m64 __m, __m64 __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSRAD instruction.
+/// This intrinsic corresponds to the <c> PSRAD </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [2 x i32].
@@ -943,7 +947,7 @@ _mm_srai_pi32(__m64 __m, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSRLW instruction.
+/// This intrinsic corresponds to the <c> PSRLW </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [4 x i16].
@@ -964,7 +968,7 @@ _mm_srl_pi16(__m64 __m, __m64 __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSRLW instruction.
+/// This intrinsic corresponds to the <c> PSRLW </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [4 x i16].
@@ -986,7 +990,7 @@ _mm_srli_pi16(__m64 __m, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSRLD instruction.
+/// This intrinsic corresponds to the <c> PSRLD </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [2 x i32].
@@ -1007,7 +1011,7 @@ _mm_srl_pi32(__m64 __m, __m64 __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSRLD instruction.
+/// This intrinsic corresponds to the <c> PSRLD </c> instruction.
///
/// \param __m
/// A 64-bit integer vector of [2 x i32].
@@ -1027,7 +1031,7 @@ _mm_srli_pi32(__m64 __m, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSRLQ instruction.
+/// This intrinsic corresponds to the <c> PSRLQ </c> instruction.
///
/// \param __m
/// A 64-bit integer vector interpreted as a single 64-bit integer.
@@ -1046,7 +1050,7 @@ _mm_srl_si64(__m64 __m, __m64 __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSRLQ instruction.
+/// This intrinsic corresponds to the <c> PSRLQ </c> instruction.
///
/// \param __m
/// A 64-bit integer vector interpreted as a single 64-bit integer.
@@ -1063,7 +1067,7 @@ _mm_srli_si64(__m64 __m, int __count)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PAND instruction.
+/// This intrinsic corresponds to the <c> PAND </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector.
@@ -1083,7 +1087,7 @@ _mm_and_si64(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PANDN instruction.
+/// This intrinsic corresponds to the <c> PANDN </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector. The one's complement of this parameter is used
@@ -1102,7 +1106,7 @@ _mm_andnot_si64(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c POR instruction.
+/// This intrinsic corresponds to the <c> POR </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector.
@@ -1120,7 +1124,7 @@ _mm_or_si64(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PXOR instruction.
+/// This intrinsic corresponds to the <c> PXOR </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector.
@@ -1141,7 +1145,7 @@ _mm_xor_si64(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PCMPEQB instruction.
+/// This intrinsic corresponds to the <c> PCMPEQB </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [8 x i8].
@@ -1162,7 +1166,7 @@ _mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PCMPEQW instruction.
+/// This intrinsic corresponds to the <c> PCMPEQW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16].
@@ -1183,7 +1187,7 @@ _mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PCMPEQD instruction.
+/// This intrinsic corresponds to the <c> PCMPEQD </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [2 x i32].
@@ -1204,7 +1208,7 @@ _mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PCMPGTB instruction.
+/// This intrinsic corresponds to the <c> PCMPGTB </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [8 x i8].
@@ -1225,7 +1229,7 @@ _mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PCMPGTW instruction.
+/// This intrinsic corresponds to the <c> PCMPGTW </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [4 x i16].
@@ -1246,7 +1250,7 @@ _mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PCMPGTD instruction.
+/// This intrinsic corresponds to the <c> PCMPGTD </c> instruction.
///
/// \param __m1
/// A 64-bit integer vector of [2 x i32].
@@ -1264,7 +1268,7 @@ _mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the the \c VXORPS / XORPS instruction.
+/// This intrinsic corresponds to the the <c> VXORPS / XORPS </c> instruction.
///
/// \returns An initialized 64-bit integer vector with all elements set to zero.
static __inline__ __m64 __DEFAULT_FN_ATTRS
@@ -1356,7 +1360,7 @@ _mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSHUFD / PSHUFD instruction.
+/// This intrinsic corresponds to the <c> VPSHUFD / PSHUFD </c> instruction.
///
/// \param __i
/// A 32-bit integer value used to initialize each vector element of the
@@ -1374,7 +1378,7 @@ _mm_set1_pi32(int __i)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPSHUFLW / PSHUFLW instruction.
+/// This intrinsic corresponds to the <c> VPSHUFLW / PSHUFLW </c> instruction.
///
/// \param __w
/// A 16-bit integer value used to initialize each vector element of the
@@ -1391,8 +1395,8 @@ _mm_set1_pi16(short __w)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPUNPCKLBW + VPSHUFLW / \c PUNPCKLBW +
-/// PSHUFLW instruction.
+/// This intrinsic corresponds to the <c> VPUNPCKLBW + VPSHUFLW / PUNPCKLBW +
+/// PSHUFLW </c> instruction.
///
/// \param __b
/// An 8-bit integer value used to initialize each vector element of the
diff --git a/lib/Headers/module.modulemap b/lib/Headers/module.modulemap
index 3e40d2c08d8c..11ef2f902945 100644
--- a/lib/Headers/module.modulemap
+++ b/lib/Headers/module.modulemap
@@ -63,11 +63,13 @@ module _Builtin_intrinsics [system] [extern_c] {
textual header "mwaitxintrin.h"
explicit module mm_malloc {
+ requires !freestanding
header "mm_malloc.h"
export * // note: for <stdlib.h> dependency
}
explicit module cpuid {
+ requires gnuinlineasm
header "cpuid.h"
}
diff --git a/lib/Headers/opencl-c.h b/lib/Headers/opencl-c.h
index 802927490e7f..0c25d312709d 100644
--- a/lib/Headers/opencl-c.h
+++ b/lib/Headers/opencl-c.h
@@ -17,6 +17,7 @@
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
#define __ovld __attribute__((overloadable))
+#define __conv __attribute__((convergent))
// Optimizations
#define __purefn __attribute__((pure))
@@ -9810,14 +9811,6 @@ float3 __ovld __cnfn native_cos(float3 x);
float4 __ovld __cnfn native_cos(float4 x);
float8 __ovld __cnfn native_cos(float8 x);
float16 __ovld __cnfn native_cos(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_cos(double x);
-double2 __ovld __cnfn native_cos(double2 x);
-double3 __ovld __cnfn native_cos(double3 x);
-double4 __ovld __cnfn native_cos(double4 x);
-double8 __ovld __cnfn native_cos(double8 x);
-double16 __ovld __cnfn native_cos(double16 x);
-#endif //cl_khr_fp64
/**
* Compute x / y over an implementation-defined range.
@@ -9829,14 +9822,6 @@ float3 __ovld __cnfn native_divide(float3 x, float3 y);
float4 __ovld __cnfn native_divide(float4 x, float4 y);
float8 __ovld __cnfn native_divide(float8 x, float8 y);
float16 __ovld __cnfn native_divide(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_divide(double x, double y);
-double2 __ovld __cnfn native_divide(double2 x, double2 y);
-double3 __ovld __cnfn native_divide(double3 x, double3 y);
-double4 __ovld __cnfn native_divide(double4 x, double4 y);
-double8 __ovld __cnfn native_divide(double8 x, double8 y);
-double16 __ovld __cnfn native_divide(double16 x, double16 y);
-#endif //cl_khr_fp64
/**
* Compute the base- e exponential of x over an
@@ -9849,14 +9834,6 @@ float3 __ovld __cnfn native_exp(float3 x);
float4 __ovld __cnfn native_exp(float4 x);
float8 __ovld __cnfn native_exp(float8 x);
float16 __ovld __cnfn native_exp(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_exp(double x);
-double2 __ovld __cnfn native_exp(double2 x);
-double3 __ovld __cnfn native_exp(double3 x);
-double4 __ovld __cnfn native_exp(double4 x);
-double8 __ovld __cnfn native_exp(double8 x);
-double16 __ovld __cnfn native_exp(double16 x);
-#endif //cl_khr_fp64
/**
* Compute the base- 2 exponential of x over an
@@ -9869,14 +9846,6 @@ float3 __ovld __cnfn native_exp2(float3 x);
float4 __ovld __cnfn native_exp2(float4 x);
float8 __ovld __cnfn native_exp2(float8 x);
float16 __ovld __cnfn native_exp2(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_exp2(double x);
-double2 __ovld __cnfn native_exp2(double2 x);
-double3 __ovld __cnfn native_exp2(double3 x);
-double4 __ovld __cnfn native_exp2(double4 x);
-double8 __ovld __cnfn native_exp2(double8 x);
-double16 __ovld __cnfn native_exp2(double16 x);
-#endif //cl_khr_fp64
/**
* Compute the base- 10 exponential of x over an
@@ -9889,14 +9858,6 @@ float3 __ovld __cnfn native_exp10(float3 x);
float4 __ovld __cnfn native_exp10(float4 x);
float8 __ovld __cnfn native_exp10(float8 x);
float16 __ovld __cnfn native_exp10(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_exp10(double x);
-double2 __ovld __cnfn native_exp10(double2 x);
-double3 __ovld __cnfn native_exp10(double3 x);
-double4 __ovld __cnfn native_exp10(double4 x);
-double8 __ovld __cnfn native_exp10(double8 x);
-double16 __ovld __cnfn native_exp10(double16 x);
-#endif //cl_khr_fp64
/**
* Compute natural logarithm over an implementationdefined
@@ -9909,14 +9870,6 @@ float3 __ovld __cnfn native_log(float3 x);
float4 __ovld __cnfn native_log(float4 x);
float8 __ovld __cnfn native_log(float8 x);
float16 __ovld __cnfn native_log(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_log(double x);
-double2 __ovld __cnfn native_log(double2 x);
-double3 __ovld __cnfn native_log(double3 x);
-double4 __ovld __cnfn native_log(double4 x);
-double8 __ovld __cnfn native_log(double8 x);
-double16 __ovld __cnfn native_log(double16 x);
-#endif //cl_khr_fp64
/**
* Compute a base 2 logarithm over an implementationdefined
@@ -9928,14 +9881,6 @@ float3 __ovld __cnfn native_log2(float3 x);
float4 __ovld __cnfn native_log2(float4 x);
float8 __ovld __cnfn native_log2(float8 x);
float16 __ovld __cnfn native_log2(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_log2(double x);
-double2 __ovld __cnfn native_log2(double2 x);
-double3 __ovld __cnfn native_log2(double3 x);
-double4 __ovld __cnfn native_log2(double4 x);
-double8 __ovld __cnfn native_log2(double8 x);
-double16 __ovld __cnfn native_log2(double16 x);
-#endif //cl_khr_fp64
/**
* Compute a base 10 logarithm over an implementationdefined
@@ -9947,14 +9892,6 @@ float3 __ovld __cnfn native_log10(float3 x);
float4 __ovld __cnfn native_log10(float4 x);
float8 __ovld __cnfn native_log10(float8 x);
float16 __ovld __cnfn native_log10(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_log10(double x);
-double2 __ovld __cnfn native_log10(double2 x);
-double3 __ovld __cnfn native_log10(double3 x);
-double4 __ovld __cnfn native_log10(double4 x);
-double8 __ovld __cnfn native_log10(double8 x);
-double16 __ovld __cnfn native_log10(double16 x);
-#endif //cl_khr_fp64
/**
* Compute x to the power y, where x is >= 0. The range of
@@ -9967,14 +9904,6 @@ float3 __ovld __cnfn native_powr(float3 x, float3 y);
float4 __ovld __cnfn native_powr(float4 x, float4 y);
float8 __ovld __cnfn native_powr(float8 x, float8 y);
float16 __ovld __cnfn native_powr(float16 x, float16 y);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_powr(double x, double y);
-double2 __ovld __cnfn native_powr(double2 x, double2 y);
-double3 __ovld __cnfn native_powr(double3 x, double3 y);
-double4 __ovld __cnfn native_powr(double4 x, double4 y);
-double8 __ovld __cnfn native_powr(double8 x, double8 y);
-double16 __ovld __cnfn native_powr(double16 x, double16 y);
-#endif //cl_khr_fp64
/**
* Compute reciprocal over an implementation-defined
@@ -9986,14 +9915,6 @@ float3 __ovld __cnfn native_recip(float3 x);
float4 __ovld __cnfn native_recip(float4 x);
float8 __ovld __cnfn native_recip(float8 x);
float16 __ovld __cnfn native_recip(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_recip(double x);
-double2 __ovld __cnfn native_recip(double2 x);
-double3 __ovld __cnfn native_recip(double3 x);
-double4 __ovld __cnfn native_recip(double4 x);
-double8 __ovld __cnfn native_recip(double8 x);
-double16 __ovld __cnfn native_recip(double16 x);
-#endif //cl_khr_fp64
/**
* Compute inverse square root over an implementationdefined
@@ -10005,14 +9926,6 @@ float3 __ovld __cnfn native_rsqrt(float3 x);
float4 __ovld __cnfn native_rsqrt(float4 x);
float8 __ovld __cnfn native_rsqrt(float8 x);
float16 __ovld __cnfn native_rsqrt(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_rsqrt(double x);
-double2 __ovld __cnfn native_rsqrt(double2 x);
-double3 __ovld __cnfn native_rsqrt(double3 x);
-double4 __ovld __cnfn native_rsqrt(double4 x);
-double8 __ovld __cnfn native_rsqrt(double8 x);
-double16 __ovld __cnfn native_rsqrt(double16 x);
-#endif //cl_khr_fp64
/**
* Compute sine over an implementation-defined range.
@@ -10024,14 +9937,6 @@ float3 __ovld __cnfn native_sin(float3 x);
float4 __ovld __cnfn native_sin(float4 x);
float8 __ovld __cnfn native_sin(float8 x);
float16 __ovld __cnfn native_sin(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_sin(double x);
-double2 __ovld __cnfn native_sin(double2 x);
-double3 __ovld __cnfn native_sin(double3 x);
-double4 __ovld __cnfn native_sin(double4 x);
-double8 __ovld __cnfn native_sin(double8 x);
-double16 __ovld __cnfn native_sin(double16 x);
-#endif //cl_khr_fp64
/**
* Compute square root over an implementation-defined
@@ -10043,14 +9948,6 @@ float3 __ovld __cnfn native_sqrt(float3 x);
float4 __ovld __cnfn native_sqrt(float4 x);
float8 __ovld __cnfn native_sqrt(float8 x);
float16 __ovld __cnfn native_sqrt(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_sqrt(double x);
-double2 __ovld __cnfn native_sqrt(double2 x);
-double3 __ovld __cnfn native_sqrt(double3 x);
-double4 __ovld __cnfn native_sqrt(double4 x);
-double8 __ovld __cnfn native_sqrt(double8 x);
-double16 __ovld __cnfn native_sqrt(double16 x);
-#endif //cl_khr_fp64
/**
* Compute tangent over an implementation-defined range.
@@ -10062,14 +9959,6 @@ float3 __ovld __cnfn native_tan(float3 x);
float4 __ovld __cnfn native_tan(float4 x);
float8 __ovld __cnfn native_tan(float8 x);
float16 __ovld __cnfn native_tan(float16 x);
-#ifdef cl_khr_fp64
-double __ovld __cnfn native_tan(double x);
-double2 __ovld __cnfn native_tan(double2 x);
-double3 __ovld __cnfn native_tan(double3 x);
-double4 __ovld __cnfn native_tan(double4 x);
-double8 __ovld __cnfn native_tan(double8 x);
-double16 __ovld __cnfn native_tan(double16 x);
-#endif //cl_khr_fp64
// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
@@ -13934,7 +13823,7 @@ typedef uint cl_mem_fence_flags;
* image objects and then want to read the updated data.
*/
-void __ovld barrier(cl_mem_fence_flags flags);
+void __ovld __conv barrier(cl_mem_fence_flags flags);
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
@@ -13947,8 +13836,8 @@ typedef enum memory_scope
memory_scope_sub_group
} memory_scope;
-void __ovld work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
-void __ovld work_group_barrier(cl_mem_fence_flags flags);
+void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
+void __ovld __conv work_group_barrier(cl_mem_fence_flags flags);
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
// OpenCL v1.1 s6.11.9, v1.2 s6.12.9 - Explicit Memory Fence Functions
@@ -14728,6 +14617,13 @@ int __ovld atom_xor(volatile __local int *p, int val);
unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val);
#endif
+#if defined(cl_khr_int64_extended_atomics)
+long __ovld atom_xor(volatile __global long *p, long val);
+unsigned long __ovld atom_xor(volatile __global unsigned long *p, unsigned long val);
+long __ovld atom_xor(volatile __local long *p, long val);
+unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long val);
+#endif
+
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : disable
#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : disable
@@ -15564,9 +15460,11 @@ half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
#endif //cl_khr_fp16
+#if __OPENCL_C_VERSION__ >= CL_VERSION_1_2
// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
int printf(__constant const char* st, ...);
+#endif
// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
@@ -15592,6 +15490,10 @@ int printf(__constant const char* st, ...);
#define CLK_FILTER_NEAREST 0x10
#define CLK_FILTER_LINEAR 0x20
+#ifdef cl_khr_gl_msaa_sharing
+#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
+#endif //cl_khr_gl_msaa_sharing
+
/**
* Use the coordinate (coord.xy) to do an element lookup in
* the 2D image object specified by image.
@@ -16493,6 +16395,7 @@ int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_dept
#define CLK_sRGBA 0x10C1
#define CLK_sRGBx 0x10C0
#define CLK_sBGRA 0x10C2
+#define CLK_ABGR 0x10C3
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
int __ovld __cnfn get_image_channel_order(read_only image1d_t image);
@@ -16670,101 +16573,101 @@ int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
// OpenCL v2.0 s6.13.15 - Work-group Functions
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-int __ovld work_group_all(int predicate);
-int __ovld work_group_any(int predicate);
+int __ovld __conv work_group_all(int predicate);
+int __ovld __conv work_group_any(int predicate);
#ifdef cl_khr_fp16
-half __ovld work_group_broadcast(half a, size_t local_id);
-half __ovld work_group_broadcast(half a, size_t x, size_t y);
-half __ovld work_group_broadcast(half a, size_t x, size_t y, size_t z);
+half __ovld __conv work_group_broadcast(half a, size_t local_id);
+half __ovld __conv work_group_broadcast(half a, size_t x, size_t y);
+half __ovld __conv work_group_broadcast(half a, size_t x, size_t y, size_t z);
#endif
-int __ovld work_group_broadcast(int a, size_t local_id);
-int __ovld work_group_broadcast(int a, size_t x, size_t y);
-int __ovld work_group_broadcast(int a, size_t x, size_t y, size_t z);
-uint __ovld work_group_broadcast(uint a, size_t local_id);
-uint __ovld work_group_broadcast(uint a, size_t x, size_t y);
-uint __ovld work_group_broadcast(uint a, size_t x, size_t y, size_t z);
-long __ovld work_group_broadcast(long a, size_t local_id);
-long __ovld work_group_broadcast(long a, size_t x, size_t y);
-long __ovld work_group_broadcast(long a, size_t x, size_t y, size_t z);
-ulong __ovld work_group_broadcast(ulong a, size_t local_id);
-ulong __ovld work_group_broadcast(ulong a, size_t x, size_t y);
-ulong __ovld work_group_broadcast(ulong a, size_t x, size_t y, size_t z);
-float __ovld work_group_broadcast(float a, size_t local_id);
-float __ovld work_group_broadcast(float a, size_t x, size_t y);
-float __ovld work_group_broadcast(float a, size_t x, size_t y, size_t z);
+int __ovld __conv work_group_broadcast(int a, size_t local_id);
+int __ovld __conv work_group_broadcast(int a, size_t x, size_t y);
+int __ovld __conv work_group_broadcast(int a, size_t x, size_t y, size_t z);
+uint __ovld __conv work_group_broadcast(uint a, size_t local_id);
+uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y);
+uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y, size_t z);
+long __ovld __conv work_group_broadcast(long a, size_t local_id);
+long __ovld __conv work_group_broadcast(long a, size_t x, size_t y);
+long __ovld __conv work_group_broadcast(long a, size_t x, size_t y, size_t z);
+ulong __ovld __conv work_group_broadcast(ulong a, size_t local_id);
+ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y);
+ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y, size_t z);
+float __ovld __conv work_group_broadcast(float a, size_t local_id);
+float __ovld __conv work_group_broadcast(float a, size_t x, size_t y);
+float __ovld __conv work_group_broadcast(float a, size_t x, size_t y, size_t z);
#ifdef cl_khr_fp64
-double __ovld work_group_broadcast(double a, size_t local_id);
-double __ovld work_group_broadcast(double a, size_t x, size_t y);
-double __ovld work_group_broadcast(double a, size_t x, size_t y, size_t z);
+double __ovld __conv work_group_broadcast(double a, size_t local_id);
+double __ovld __conv work_group_broadcast(double a, size_t x, size_t y);
+double __ovld __conv work_group_broadcast(double a, size_t x, size_t y, size_t z);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld work_group_reduce_add(half x);
-half __ovld work_group_reduce_min(half x);
-half __ovld work_group_reduce_max(half x);
-half __ovld work_group_scan_exclusive_add(half x);
-half __ovld work_group_scan_exclusive_min(half x);
-half __ovld work_group_scan_exclusive_max(half x);
-half __ovld work_group_scan_inclusive_add(half x);
-half __ovld work_group_scan_inclusive_min(half x);
-half __ovld work_group_scan_inclusive_max(half x);
+half __ovld __conv work_group_reduce_add(half x);
+half __ovld __conv work_group_reduce_min(half x);
+half __ovld __conv work_group_reduce_max(half x);
+half __ovld __conv work_group_scan_exclusive_add(half x);
+half __ovld __conv work_group_scan_exclusive_min(half x);
+half __ovld __conv work_group_scan_exclusive_max(half x);
+half __ovld __conv work_group_scan_inclusive_add(half x);
+half __ovld __conv work_group_scan_inclusive_min(half x);
+half __ovld __conv work_group_scan_inclusive_max(half x);
#endif
-int __ovld work_group_reduce_add(int x);
-int __ovld work_group_reduce_min(int x);
-int __ovld work_group_reduce_max(int x);
-int __ovld work_group_scan_exclusive_add(int x);
-int __ovld work_group_scan_exclusive_min(int x);
-int __ovld work_group_scan_exclusive_max(int x);
-int __ovld work_group_scan_inclusive_add(int x);
-int __ovld work_group_scan_inclusive_min(int x);
-int __ovld work_group_scan_inclusive_max(int x);
-uint __ovld work_group_reduce_add(uint x);
-uint __ovld work_group_reduce_min(uint x);
-uint __ovld work_group_reduce_max(uint x);
-uint __ovld work_group_scan_exclusive_add(uint x);
-uint __ovld work_group_scan_exclusive_min(uint x);
-uint __ovld work_group_scan_exclusive_max(uint x);
-uint __ovld work_group_scan_inclusive_add(uint x);
-uint __ovld work_group_scan_inclusive_min(uint x);
-uint __ovld work_group_scan_inclusive_max(uint x);
-long __ovld work_group_reduce_add(long x);
-long __ovld work_group_reduce_min(long x);
-long __ovld work_group_reduce_max(long x);
-long __ovld work_group_scan_exclusive_add(long x);
-long __ovld work_group_scan_exclusive_min(long x);
-long __ovld work_group_scan_exclusive_max(long x);
-long __ovld work_group_scan_inclusive_add(long x);
-long __ovld work_group_scan_inclusive_min(long x);
-long __ovld work_group_scan_inclusive_max(long x);
-ulong __ovld work_group_reduce_add(ulong x);
-ulong __ovld work_group_reduce_min(ulong x);
-ulong __ovld work_group_reduce_max(ulong x);
-ulong __ovld work_group_scan_exclusive_add(ulong x);
-ulong __ovld work_group_scan_exclusive_min(ulong x);
-ulong __ovld work_group_scan_exclusive_max(ulong x);
-ulong __ovld work_group_scan_inclusive_add(ulong x);
-ulong __ovld work_group_scan_inclusive_min(ulong x);
-ulong __ovld work_group_scan_inclusive_max(ulong x);
-float __ovld work_group_reduce_add(float x);
-float __ovld work_group_reduce_min(float x);
-float __ovld work_group_reduce_max(float x);
-float __ovld work_group_scan_exclusive_add(float x);
-float __ovld work_group_scan_exclusive_min(float x);
-float __ovld work_group_scan_exclusive_max(float x);
-float __ovld work_group_scan_inclusive_add(float x);
-float __ovld work_group_scan_inclusive_min(float x);
-float __ovld work_group_scan_inclusive_max(float x);
+int __ovld __conv work_group_reduce_add(int x);
+int __ovld __conv work_group_reduce_min(int x);
+int __ovld __conv work_group_reduce_max(int x);
+int __ovld __conv work_group_scan_exclusive_add(int x);
+int __ovld __conv work_group_scan_exclusive_min(int x);
+int __ovld __conv work_group_scan_exclusive_max(int x);
+int __ovld __conv work_group_scan_inclusive_add(int x);
+int __ovld __conv work_group_scan_inclusive_min(int x);
+int __ovld __conv work_group_scan_inclusive_max(int x);
+uint __ovld __conv work_group_reduce_add(uint x);
+uint __ovld __conv work_group_reduce_min(uint x);
+uint __ovld __conv work_group_reduce_max(uint x);
+uint __ovld __conv work_group_scan_exclusive_add(uint x);
+uint __ovld __conv work_group_scan_exclusive_min(uint x);
+uint __ovld __conv work_group_scan_exclusive_max(uint x);
+uint __ovld __conv work_group_scan_inclusive_add(uint x);
+uint __ovld __conv work_group_scan_inclusive_min(uint x);
+uint __ovld __conv work_group_scan_inclusive_max(uint x);
+long __ovld __conv work_group_reduce_add(long x);
+long __ovld __conv work_group_reduce_min(long x);
+long __ovld __conv work_group_reduce_max(long x);
+long __ovld __conv work_group_scan_exclusive_add(long x);
+long __ovld __conv work_group_scan_exclusive_min(long x);
+long __ovld __conv work_group_scan_exclusive_max(long x);
+long __ovld __conv work_group_scan_inclusive_add(long x);
+long __ovld __conv work_group_scan_inclusive_min(long x);
+long __ovld __conv work_group_scan_inclusive_max(long x);
+ulong __ovld __conv work_group_reduce_add(ulong x);
+ulong __ovld __conv work_group_reduce_min(ulong x);
+ulong __ovld __conv work_group_reduce_max(ulong x);
+ulong __ovld __conv work_group_scan_exclusive_add(ulong x);
+ulong __ovld __conv work_group_scan_exclusive_min(ulong x);
+ulong __ovld __conv work_group_scan_exclusive_max(ulong x);
+ulong __ovld __conv work_group_scan_inclusive_add(ulong x);
+ulong __ovld __conv work_group_scan_inclusive_min(ulong x);
+ulong __ovld __conv work_group_scan_inclusive_max(ulong x);
+float __ovld __conv work_group_reduce_add(float x);
+float __ovld __conv work_group_reduce_min(float x);
+float __ovld __conv work_group_reduce_max(float x);
+float __ovld __conv work_group_scan_exclusive_add(float x);
+float __ovld __conv work_group_scan_exclusive_min(float x);
+float __ovld __conv work_group_scan_exclusive_max(float x);
+float __ovld __conv work_group_scan_inclusive_add(float x);
+float __ovld __conv work_group_scan_inclusive_min(float x);
+float __ovld __conv work_group_scan_inclusive_max(float x);
#ifdef cl_khr_fp64
-double __ovld work_group_reduce_add(double x);
-double __ovld work_group_reduce_min(double x);
-double __ovld work_group_reduce_max(double x);
-double __ovld work_group_scan_exclusive_add(double x);
-double __ovld work_group_scan_exclusive_min(double x);
-double __ovld work_group_scan_exclusive_max(double x);
-double __ovld work_group_scan_inclusive_add(double x);
-double __ovld work_group_scan_inclusive_min(double x);
-double __ovld work_group_scan_inclusive_max(double x);
+double __ovld __conv work_group_reduce_add(double x);
+double __ovld __conv work_group_reduce_min(double x);
+double __ovld __conv work_group_reduce_max(double x);
+double __ovld __conv work_group_scan_exclusive_add(double x);
+double __ovld __conv work_group_scan_exclusive_min(double x);
+double __ovld __conv work_group_scan_exclusive_max(double x);
+double __ovld __conv work_group_scan_inclusive_add(double x);
+double __ovld __conv work_group_scan_inclusive_min(double x);
+double __ovld __conv work_group_scan_inclusive_max(double x);
#endif //cl_khr_fp64
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
@@ -16840,11 +16743,11 @@ void __ovld retain_event(clk_event_t);
void __ovld release_event(clk_event_t);
-clk_event_t create_user_event(void);
+clk_event_t __ovld create_user_event(void);
void __ovld set_user_event_status(clk_event_t e, int state);
-bool is_valid_event (clk_event_t event);
+bool __ovld is_valid_event (clk_event_t event);
void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
@@ -16864,96 +16767,286 @@ uint __ovld get_enqueued_num_sub_groups(void);
uint __ovld get_sub_group_id(void);
uint __ovld get_sub_group_local_id(void);
-void __ovld sub_group_barrier(cl_mem_fence_flags flags);
+void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
#if __OPENCL_C_VERSION__ >= CL_VERSION_2_0
-void __ovld sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
+void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
#endif //__OPENCL_C_VERSION__ >= CL_VERSION_2_0
-int __ovld sub_group_all(int predicate);
-int __ovld sub_group_any(int predicate);
-
-int __ovld sub_group_broadcast(int x, uint sub_group_local_id);
-uint __ovld sub_group_broadcast(uint x, uint sub_group_local_id);
-long __ovld sub_group_broadcast(long x, uint sub_group_local_id);
-ulong __ovld sub_group_broadcast(ulong x, uint sub_group_local_id);
-float __ovld sub_group_broadcast(float x, uint sub_group_local_id);
-
-int __ovld sub_group_reduce_add(int x);
-uint __ovld sub_group_reduce_add(uint x);
-long __ovld sub_group_reduce_add(long x);
-ulong __ovld sub_group_reduce_add(ulong x);
-float __ovld sub_group_reduce_add(float x);
-int __ovld sub_group_reduce_min(int x);
-uint __ovld sub_group_reduce_min(uint x);
-long __ovld sub_group_reduce_min(long x);
-ulong __ovld sub_group_reduce_min(ulong x);
-float __ovld sub_group_reduce_min(float x);
-int __ovld sub_group_reduce_max(int x);
-uint __ovld sub_group_reduce_max(uint x);
-long __ovld sub_group_reduce_max(long x);
-ulong __ovld sub_group_reduce_max(ulong x);
-float __ovld sub_group_reduce_max(float x);
-
-int __ovld sub_group_scan_exclusive_add(int x);
-uint __ovld sub_group_scan_exclusive_add(uint x);
-long __ovld sub_group_scan_exclusive_add(long x);
-ulong __ovld sub_group_scan_exclusive_add(ulong x);
-float __ovld sub_group_scan_exclusive_add(float x);
-int __ovld sub_group_scan_exclusive_min(int x);
-uint __ovld sub_group_scan_exclusive_min(uint x);
-long __ovld sub_group_scan_exclusive_min(long x);
-ulong __ovld sub_group_scan_exclusive_min(ulong x);
-float __ovld sub_group_scan_exclusive_min(float x);
-int __ovld sub_group_scan_exclusive_max(int x);
-uint __ovld sub_group_scan_exclusive_max(uint x);
-long __ovld sub_group_scan_exclusive_max(long x);
-ulong __ovld sub_group_scan_exclusive_max(ulong x);
-float __ovld sub_group_scan_exclusive_max(float x);
-
-int __ovld sub_group_scan_inclusive_add(int x);
-uint __ovld sub_group_scan_inclusive_add(uint x);
-long __ovld sub_group_scan_inclusive_add(long x);
-ulong __ovld sub_group_scan_inclusive_add(ulong x);
-float __ovld sub_group_scan_inclusive_add(float x);
-int __ovld sub_group_scan_inclusive_min(int x);
-uint __ovld sub_group_scan_inclusive_min(uint x);
-long __ovld sub_group_scan_inclusive_min(long x);
-ulong __ovld sub_group_scan_inclusive_min(ulong x);
-float __ovld sub_group_scan_inclusive_min(float x);
-int __ovld sub_group_scan_inclusive_max(int x);
-uint __ovld sub_group_scan_inclusive_max(uint x);
-long __ovld sub_group_scan_inclusive_max(long x);
-ulong __ovld sub_group_scan_inclusive_max(ulong x);
-float __ovld sub_group_scan_inclusive_max(float x);
+int __ovld __conv sub_group_all(int predicate);
+int __ovld __conv sub_group_any(int predicate);
+
+int __ovld __conv sub_group_broadcast(int x, uint sub_group_local_id);
+uint __ovld __conv sub_group_broadcast(uint x, uint sub_group_local_id);
+long __ovld __conv sub_group_broadcast(long x, uint sub_group_local_id);
+ulong __ovld __conv sub_group_broadcast(ulong x, uint sub_group_local_id);
+float __ovld __conv sub_group_broadcast(float x, uint sub_group_local_id);
+
+int __ovld __conv sub_group_reduce_add(int x);
+uint __ovld __conv sub_group_reduce_add(uint x);
+long __ovld __conv sub_group_reduce_add(long x);
+ulong __ovld __conv sub_group_reduce_add(ulong x);
+float __ovld __conv sub_group_reduce_add(float x);
+int __ovld __conv sub_group_reduce_min(int x);
+uint __ovld __conv sub_group_reduce_min(uint x);
+long __ovld __conv sub_group_reduce_min(long x);
+ulong __ovld __conv sub_group_reduce_min(ulong x);
+float __ovld __conv sub_group_reduce_min(float x);
+int __ovld __conv sub_group_reduce_max(int x);
+uint __ovld __conv sub_group_reduce_max(uint x);
+long __ovld __conv sub_group_reduce_max(long x);
+ulong __ovld __conv sub_group_reduce_max(ulong x);
+float __ovld __conv sub_group_reduce_max(float x);
+
+int __ovld __conv sub_group_scan_exclusive_add(int x);
+uint __ovld __conv sub_group_scan_exclusive_add(uint x);
+long __ovld __conv sub_group_scan_exclusive_add(long x);
+ulong __ovld __conv sub_group_scan_exclusive_add(ulong x);
+float __ovld __conv sub_group_scan_exclusive_add(float x);
+int __ovld __conv sub_group_scan_exclusive_min(int x);
+uint __ovld __conv sub_group_scan_exclusive_min(uint x);
+long __ovld __conv sub_group_scan_exclusive_min(long x);
+ulong __ovld __conv sub_group_scan_exclusive_min(ulong x);
+float __ovld __conv sub_group_scan_exclusive_min(float x);
+int __ovld __conv sub_group_scan_exclusive_max(int x);
+uint __ovld __conv sub_group_scan_exclusive_max(uint x);
+long __ovld __conv sub_group_scan_exclusive_max(long x);
+ulong __ovld __conv sub_group_scan_exclusive_max(ulong x);
+float __ovld __conv sub_group_scan_exclusive_max(float x);
+
+int __ovld __conv sub_group_scan_inclusive_add(int x);
+uint __ovld __conv sub_group_scan_inclusive_add(uint x);
+long __ovld __conv sub_group_scan_inclusive_add(long x);
+ulong __ovld __conv sub_group_scan_inclusive_add(ulong x);
+float __ovld __conv sub_group_scan_inclusive_add(float x);
+int __ovld __conv sub_group_scan_inclusive_min(int x);
+uint __ovld __conv sub_group_scan_inclusive_min(uint x);
+long __ovld __conv sub_group_scan_inclusive_min(long x);
+ulong __ovld __conv sub_group_scan_inclusive_min(ulong x);
+float __ovld __conv sub_group_scan_inclusive_min(float x);
+int __ovld __conv sub_group_scan_inclusive_max(int x);
+uint __ovld __conv sub_group_scan_inclusive_max(uint x);
+long __ovld __conv sub_group_scan_inclusive_max(long x);
+ulong __ovld __conv sub_group_scan_inclusive_max(ulong x);
+float __ovld __conv sub_group_scan_inclusive_max(float x);
#ifdef cl_khr_fp16
-half __ovld sub_group_broadcast(half x, uint sub_group_local_id);
-half __ovld sub_group_reduce_add(half x);
-half __ovld sub_group_reduce_min(half x);
-half __ovld sub_group_reduce_max(half x);
-half __ovld sub_group_scan_exclusive_add(half x);
-half __ovld sub_group_scan_exclusive_min(half x);
-half __ovld sub_group_scan_exclusive_max(half x);
-half __ovld sub_group_scan_inclusive_add(half x);
-half __ovld sub_group_scan_inclusive_min(half x);
-half __ovld sub_group_scan_inclusive_max(half x);
+half __ovld __conv sub_group_broadcast(half x, uint sub_group_local_id);
+half __ovld __conv sub_group_reduce_add(half x);
+half __ovld __conv sub_group_reduce_min(half x);
+half __ovld __conv sub_group_reduce_max(half x);
+half __ovld __conv sub_group_scan_exclusive_add(half x);
+half __ovld __conv sub_group_scan_exclusive_min(half x);
+half __ovld __conv sub_group_scan_exclusive_max(half x);
+half __ovld __conv sub_group_scan_inclusive_add(half x);
+half __ovld __conv sub_group_scan_inclusive_min(half x);
+half __ovld __conv sub_group_scan_inclusive_max(half x);
#endif //cl_khr_fp16
#ifdef cl_khr_fp64
-double __ovld sub_group_broadcast(double x, uint sub_group_local_id);
-double __ovld sub_group_reduce_add(double x);
-double __ovld sub_group_reduce_min(double x);
-double __ovld sub_group_reduce_max(double x);
-double __ovld sub_group_scan_exclusive_add(double x);
-double __ovld sub_group_scan_exclusive_min(double x);
-double __ovld sub_group_scan_exclusive_max(double x);
-double __ovld sub_group_scan_inclusive_add(double x);
-double __ovld sub_group_scan_inclusive_min(double x);
-double __ovld sub_group_scan_inclusive_max(double x);
+double __ovld __conv sub_group_broadcast(double x, uint sub_group_local_id);
+double __ovld __conv sub_group_reduce_add(double x);
+double __ovld __conv sub_group_reduce_min(double x);
+double __ovld __conv sub_group_reduce_max(double x);
+double __ovld __conv sub_group_scan_exclusive_add(double x);
+double __ovld __conv sub_group_scan_exclusive_min(double x);
+double __ovld __conv sub_group_scan_exclusive_max(double x);
+double __ovld __conv sub_group_scan_inclusive_add(double x);
+double __ovld __conv sub_group_scan_inclusive_min(double x);
+double __ovld __conv sub_group_scan_inclusive_max(double x);
#endif //cl_khr_fp64
#endif //cl_khr_subgroups cl_intel_subgroups
+#ifdef cl_amd_media_ops
+uint __ovld amd_bitalign(uint a, uint b, uint c);
+uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
+uint3 __ovld amd_bitalign(uint3 a, uint3 b, uint3 c);
+uint4 __ovld amd_bitalign(uint4 a, uint4 b, uint4 c);
+uint8 __ovld amd_bitalign(uint8 a, uint8 b, uint8 c);
+uint16 __ovld amd_bitalign(uint16 a, uint16 b, uint16 c);
+
+uint __ovld amd_bytealign(uint a, uint b, uint c);
+uint2 __ovld amd_bytealign(uint2 a, uint2 b, uint2 c);
+uint3 __ovld amd_bytealign(uint3 a, uint3 b, uint3 c);
+uint4 __ovld amd_bytealign(uint4 a, uint4 b, uint4 c);
+uint8 __ovld amd_bytealign(uint8 a, uint8 b, uint8 c);
+uint16 __ovld amd_bytealign(uint16 a, uint16 b, uint16 c);
+
+uint __ovld amd_lerp(uint a, uint b, uint c);
+uint2 __ovld amd_lerp(uint2 a, uint2 b, uint2 c);
+uint3 __ovld amd_lerp(uint3 a, uint3 b, uint3 c);
+uint4 __ovld amd_lerp(uint4 a, uint4 b, uint4 c);
+uint8 __ovld amd_lerp(uint8 a, uint8 b, uint8 c);
+uint16 __ovld amd_lerp(uint16 a, uint16 b, uint16 c);
+
+uint __ovld amd_pack(float4 v);
+
+uint __ovld amd_sad4(uint4 x, uint4 y, uint z);
+
+uint __ovld amd_sadhi(uint a, uint b, uint c);
+uint2 __ovld amd_sadhi(uint2 a, uint2 b, uint2 c);
+uint3 __ovld amd_sadhi(uint3 a, uint3 b, uint3 c);
+uint4 __ovld amd_sadhi(uint4 a, uint4 b, uint4 c);
+uint8 __ovld amd_sadhi(uint8 a, uint8 b, uint8 c);
+uint16 __ovld amd_sadhi(uint16 a, uint16 b, uint16 c);
+
+uint __ovld amd_sad(uint a, uint b, uint c);
+uint2 __ovld amd_sad(uint2 a, uint2 b, uint2 c);
+uint3 __ovld amd_sad(uint3 a, uint3 b, uint3 c);
+uint4 __ovld amd_sad(uint4 a, uint4 b, uint4 c);
+uint8 __ovld amd_sad(uint8 a, uint8 b, uint8 c);
+uint16 __ovld amd_sad(uint16 a, uint16 b, uint16 c);
+
+float __ovld amd_unpack0(uint a);
+float2 __ovld amd_unpack0(uint2 a);
+float3 __ovld amd_unpack0(uint3 a);
+float4 __ovld amd_unpack0(uint4 a);
+float8 __ovld amd_unpack0(uint8 a);
+float16 __ovld amd_unpack0(uint16 a);
+
+float __ovld amd_unpack1(uint a);
+float2 __ovld amd_unpack1(uint2 a);
+float3 __ovld amd_unpack1(uint3 a);
+float4 __ovld amd_unpack1(uint4 a);
+float8 __ovld amd_unpack1(uint8 a);
+float16 __ovld amd_unpack1(uint16 a);
+
+float __ovld amd_unpack2(uint a);
+float2 __ovld amd_unpack2(uint2 a);
+float3 __ovld amd_unpack2(uint3 a);
+float4 __ovld amd_unpack2(uint4 a);
+float8 __ovld amd_unpack2(uint8 a);
+float16 __ovld amd_unpack2(uint16 a);
+
+float __ovld amd_unpack3(uint a);
+float2 __ovld amd_unpack3(uint2 a);
+float3 __ovld amd_unpack3(uint3 a);
+float4 __ovld amd_unpack3(uint4 a);
+float8 __ovld amd_unpack3(uint8 a);
+float16 __ovld amd_unpack3(uint16 a);
+#endif // cl_amd_media_ops
+
+#ifdef cl_amd_media_ops2
+int __ovld amd_bfe(int src0, uint src1, uint src2);
+int2 __ovld amd_bfe(int2 src0, uint2 src1, uint2 src2);
+int3 __ovld amd_bfe(int3 src0, uint3 src1, uint3 src2);
+int4 __ovld amd_bfe(int4 src0, uint4 src1, uint4 src2);
+int8 __ovld amd_bfe(int8 src0, uint8 src1, uint8 src2);
+int16 __ovld amd_bfe(int16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_bfe(uint src0, uint src1, uint src2);
+uint2 __ovld amd_bfe(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_bfe(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_bfe(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_bfe(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_bfe(uint16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_bfm(uint src0, uint src1);
+uint2 __ovld amd_bfm(uint2 src0, uint2 src1);
+uint3 __ovld amd_bfm(uint3 src0, uint3 src1);
+uint4 __ovld amd_bfm(uint4 src0, uint4 src1);
+uint8 __ovld amd_bfm(uint8 src0, uint8 src1);
+uint16 __ovld amd_bfm(uint16 src0, uint16 src1);
+
+float __ovld amd_max3(float src0, float src1, float src2);
+float2 __ovld amd_max3(float2 src0, float2 src1, float2 src2);
+float3 __ovld amd_max3(float3 src0, float3 src1, float3 src2);
+float4 __ovld amd_max3(float4 src0, float4 src1, float4 src2);
+float8 __ovld amd_max3(float8 src0, float8 src1, float8 src2);
+float16 __ovld amd_max3(float16 src0, float16 src1, float16 src2);
+
+int __ovld amd_max3(int src0, int src1, int src2);
+int2 __ovld amd_max3(int2 src0, int2 src1, int2 src2);
+int3 __ovld amd_max3(int3 src0, int3 src1, int3 src2);
+int4 __ovld amd_max3(int4 src0, int4 src1, int4 src2);
+int8 __ovld amd_max3(int8 src0, int8 src1, int8 src2);
+int16 __ovld amd_max3(int16 src0, int16 src1, int16 src2);
+
+uint __ovld amd_max3(uint src0, uint src1, uint src2);
+uint2 __ovld amd_max3(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_max3(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_max3(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_max3(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_max3(uint16 src0, uint16 src1, uint16 src2);
+
+float __ovld amd_median3(float src0, float src1, float src2);
+float2 __ovld amd_median3(float2 src0, float2 src1, float2 src2);
+float3 __ovld amd_median3(float3 src0, float3 src1, float3 src2);
+float4 __ovld amd_median3(float4 src0, float4 src1, float4 src2);
+float8 __ovld amd_median3(float8 src0, float8 src1, float8 src2);
+float16 __ovld amd_median3(float16 src0, float16 src1, float16 src2);
+
+int __ovld amd_median3(int src0, int src1, int src2);
+int2 __ovld amd_median3(int2 src0, int2 src1, int2 src2);
+int3 __ovld amd_median3(int3 src0, int3 src1, int3 src2);
+int4 __ovld amd_median3(int4 src0, int4 src1, int4 src2);
+int8 __ovld amd_median3(int8 src0, int8 src1, int8 src2);
+int16 __ovld amd_median3(int16 src0, int16 src1, int16 src2);
+
+uint __ovld amd_median3(uint src0, uint src1, uint src2);
+uint2 __ovld amd_median3(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_median3(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_median3(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_median3(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_median3(uint16 src0, uint16 src1, uint16 src2);
+
+float __ovld amd_min3(float src0, float src1, float src);
+float2 __ovld amd_min3(float2 src0, float2 src1, float2 src);
+float3 __ovld amd_min3(float3 src0, float3 src1, float3 src);
+float4 __ovld amd_min3(float4 src0, float4 src1, float4 src);
+float8 __ovld amd_min3(float8 src0, float8 src1, float8 src);
+float16 __ovld amd_min3(float16 src0, float16 src1, float16 src);
+
+int __ovld amd_min3(int src0, int src1, int src2);
+int2 __ovld amd_min3(int2 src0, int2 src1, int2 src2);
+int3 __ovld amd_min3(int3 src0, int3 src1, int3 src2);
+int4 __ovld amd_min3(int4 src0, int4 src1, int4 src2);
+int8 __ovld amd_min3(int8 src0, int8 src1, int8 src2);
+int16 __ovld amd_min3(int16 src0, int16 src1, int16 src2);
+
+uint __ovld amd_min3(uint src0, uint src1, uint src2);
+uint2 __ovld amd_min3(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_min3(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_min3(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_min3(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_min3(uint16 src0, uint16 src1, uint16 src2);
+
+ulong __ovld amd_mqsad(ulong src0, uint src1, ulong src2);
+ulong2 __ovld amd_mqsad(ulong2 src0, uint2 src1, ulong2 src2);
+ulong3 __ovld amd_mqsad(ulong3 src0, uint3 src1, ulong3 src2);
+ulong4 __ovld amd_mqsad(ulong4 src0, uint4 src1, ulong4 src2);
+ulong8 __ovld amd_mqsad(ulong8 src0, uint8 src1, ulong8 src2);
+ulong16 __ovld amd_mqsad(ulong16 src0, uint16 src1, ulong16 src2);
+
+ulong __ovld amd_qsad(ulong src0, uint src1, ulong src2);
+ulong2 __ovld amd_qsad(ulong2 src0, uint2 src1, ulong2 src2);
+ulong3 __ovld amd_qsad(ulong3 src0, uint3 src1, ulong3 src2);
+ulong4 __ovld amd_qsad(ulong4 src0, uint4 src1, ulong4 src2);
+ulong8 __ovld amd_qsad(ulong8 src0, uint8 src1, ulong8 src2);
+ulong16 __ovld amd_qsad(ulong16 src0, uint16 src1, ulong16 src2);
+
+uint __ovld amd_msad(uint src0, uint src1, uint src2);
+uint2 __ovld amd_msad(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_msad(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_msad(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_msad(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_msad(uint16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_sadd(uint src0, uint src1, uint src2);
+uint2 __ovld amd_sadd(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_sadd(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_sadd(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_sadd(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_sadd(uint16 src0, uint16 src1, uint16 src2);
+
+uint __ovld amd_sadw(uint src0, uint src1, uint src2);
+uint2 __ovld amd_sadw(uint2 src0, uint2 src1, uint2 src2);
+uint3 __ovld amd_sadw(uint3 src0, uint3 src1, uint3 src2);
+uint4 __ovld amd_sadw(uint4 src0, uint4 src1, uint4 src2);
+uint8 __ovld amd_sadw(uint8 src0, uint8 src1, uint8 src2);
+uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
+#endif // cl_amd_media_ops2
+
// Disable any extensions we may have enabled previously.
#pragma OPENCL EXTENSION all : disable
diff --git a/lib/Headers/pmmintrin.h b/lib/Headers/pmmintrin.h
index 5b1058069c44..d4f6487af179 100644
--- a/lib/Headers/pmmintrin.h
+++ b/lib/Headers/pmmintrin.h
@@ -37,7 +37,7 @@
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VLDDQU instruction.
+/// This intrinsic corresponds to the <c> VLDDQU </c> instruction.
///
/// \param __p
/// A pointer to a 128-bit integer vector containing integer values.
@@ -53,7 +53,7 @@ _mm_lddqu_si128(__m128i const *__p)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VADDSUBPS instruction.
+/// This intrinsic corresponds to the <c> VADDSUBPS </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing the left source operand.
@@ -72,7 +72,7 @@ _mm_addsub_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VHADDPS instruction.
+/// This intrinsic corresponds to the <c> VHADDPS </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the source operands.
@@ -95,7 +95,7 @@ _mm_hadd_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VHSUBPS instruction.
+/// This intrinsic corresponds to the <c> VHSUBPS </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the source operands.
@@ -115,18 +115,18 @@ _mm_hsub_ps(__m128 __a, __m128 __b)
/// \brief Moves and duplicates high-order (odd-indexed) values from a 128-bit
/// vector of [4 x float] to float values stored in a 128-bit vector of
-/// [4 x float].
-/// Bits [127:96] of the source are written to bits [127:96] and [95:64] of
-/// the destination.
-/// Bits [63:32] of the source are written to bits [63:32] and [31:0] of the
-/// destination.
+/// [4 x float].
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVSHDUP instruction.
+/// This intrinsic corresponds to the <c> VMOVSHDUP </c> instruction.
///
/// \param __a
-/// A 128-bit vector of [4 x float].
+/// A 128-bit vector of [4 x float]. \n
+/// Bits [127:96] of the source are written to bits [127:96] and [95:64] of
+/// the destination. \n
+/// Bits [63:32] of the source are written to bits [63:32] and [31:0] of the
+/// destination.
/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated
/// values.
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -135,20 +135,19 @@ _mm_movehdup_ps(__m128 __a)
return __builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 1, 1, 3, 3);
}
-/// \brief Duplicates low-order (even-indexed) values from a 128-bit
-/// vector of [4 x float] to float values stored in a 128-bit vector of
-/// [4 x float].
-/// Bits [95:64] of the source are written to bits [127:96] and [95:64] of
-/// the destination.
-/// Bits [31:0] of the source are written to bits [63:32] and [31:0] of the
-/// destination.
+/// \brief Duplicates low-order (even-indexed) values from a 128-bit vector of
+/// [4 x float] to float values stored in a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVSLDUP instruction.
+/// This intrinsic corresponds to the <c> VMOVSLDUP </c> instruction.
///
/// \param __a
-/// A 128-bit vector of [4 x float].
+/// A 128-bit vector of [4 x float] \n
+/// Bits [95:64] of the source are written to bits [127:96] and [95:64] of
+/// the destination. \n
+/// Bits [31:0] of the source are written to bits [63:32] and [31:0] of the
+/// destination.
/// \returns A 128-bit vector of [4 x float] containing the moved and duplicated
/// values.
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -162,7 +161,7 @@ _mm_moveldup_ps(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VADDSUBPD instruction.
+/// This intrinsic corresponds to the <c> VADDSUBPD </c> instruction.
///
/// \param __a
/// A 128-bit vector of [2 x double] containing the left source operand.
@@ -181,7 +180,7 @@ _mm_addsub_pd(__m128d __a, __m128d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VHADDPD instruction.
+/// This intrinsic corresponds to the <c> VHADDPD </c> instruction.
///
/// \param __a
/// A 128-bit vector of [2 x double] containing one of the source operands.
@@ -204,7 +203,7 @@ _mm_hadd_pd(__m128d __a, __m128d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VHSUBPD instruction.
+/// This intrinsic corresponds to the <c> VHSUBPD </c> instruction.
///
/// \param __a
/// A 128-bit vector of [2 x double] containing one of the source operands.
@@ -231,7 +230,7 @@ _mm_hsub_pd(__m128d __a, __m128d __b)
/// __m128d _mm_loaddup_pd(double const * dp);
/// \endcode
///
-/// This intrinsic corresponds to the \c VMOVDDUP instruction.
+/// This intrinsic corresponds to the <c> VMOVDDUP </c> instruction.
///
/// \param dp
/// A pointer to a double-precision value to be moved and duplicated.
@@ -245,7 +244,7 @@ _mm_hsub_pd(__m128d __a, __m128d __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVDDUP instruction.
+/// This intrinsic corresponds to the <c> VMOVDDUP </c> instruction.
///
/// \param __a
/// A 128-bit vector of [2 x double]. Bits [63:0] are written to bits
@@ -272,7 +271,7 @@ _mm_movedup_pd(__m128d __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c MONITOR instruction.
+/// This intrinsic corresponds to the <c> MONITOR </c> instruction.
///
/// \param __p
/// The memory range to be monitored. The size of the range is determined by
@@ -293,7 +292,7 @@ _mm_monitor(void const *__p, unsigned __extensions, unsigned __hints)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c MWAIT instruction.
+/// This intrinsic corresponds to the <c> MWAIT </c> instruction.
///
/// \param __extensions
/// Optional extensions for the monitoring state, which may vary by
diff --git a/lib/Headers/popcntintrin.h b/lib/Headers/popcntintrin.h
index 7e2f1670805f..0b4793e58bcb 100644
--- a/lib/Headers/popcntintrin.h
+++ b/lib/Headers/popcntintrin.h
@@ -31,7 +31,7 @@
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c POPCNT instruction.
+/// This intrinsic corresponds to the <c> POPCNT </c> instruction.
///
/// \param __A
/// An unsigned 32-bit integer operand.
@@ -47,7 +47,7 @@ _mm_popcnt_u32(unsigned int __A)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c POPCNT instruction.
+/// This intrinsic corresponds to the <c> POPCNT </c> instruction.
///
/// \param __A
/// A signed 32-bit integer operand.
@@ -64,7 +64,7 @@ _popcnt32(int __A)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c POPCNT instruction.
+/// This intrinsic corresponds to the <c> POPCNT </c> instruction.
///
/// \param __A
/// An unsigned 64-bit integer operand.
@@ -80,7 +80,7 @@ _mm_popcnt_u64(unsigned long long __A)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c POPCNT instruction.
+/// This intrinsic corresponds to the <c> POPCNT </c> instruction.
///
/// \param __A
/// A signed 64-bit integer operand.
diff --git a/lib/Headers/stdatomic.h b/lib/Headers/stdatomic.h
index e03798766014..23bb3a357768 100644
--- a/lib/Headers/stdatomic.h
+++ b/lib/Headers/stdatomic.h
@@ -45,11 +45,11 @@ extern "C" {
#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
-#define ATOMIC_SHORT_T_LOCK_FREE __GCC_ATOMIC_SHORT_T_LOCK_FREE
-#define ATOMIC_INT_T_LOCK_FREE __GCC_ATOMIC_INT_T_LOCK_FREE
-#define ATOMIC_LONG_T_LOCK_FREE __GCC_ATOMIC_LONG_T_LOCK_FREE
-#define ATOMIC_LLONG_T_LOCK_FREE __GCC_ATOMIC_LLONG_T_LOCK_FREE
-#define ATOMIC_POINTER_T_LOCK_FREE __GCC_ATOMIC_POINTER_T_LOCK_FREE
+#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
+#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
+#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
+#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
+#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
/* 7.17.2 Initialization */
diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h
index a72796ba4a68..80664043a06f 100644
--- a/lib/Headers/tmmintrin.h
+++ b/lib/Headers/tmmintrin.h
@@ -483,15 +483,15 @@ _mm_hsubs_pi16(__m64 __a, __m64 __b)
/// \param __b
/// A 128-bit integer vector containing the second source operand.
/// \returns A 128-bit integer vector containing the sums of products of both
-/// operands:
-/// R0 := (__a0 * __b0) + (__a1 * __b1)
-/// R1 := (__a2 * __b2) + (__a3 * __b3)
-/// R2 := (__a4 * __b4) + (__a5 * __b5)
-/// R3 := (__a6 * __b6) + (__a7 * __b7)
-/// R4 := (__a8 * __b8) + (__a9 * __b9)
-/// R5 := (__a10 * __b10) + (__a11 * __b11)
-/// R6 := (__a12 * __b12) + (__a13 * __b13)
-/// R7 := (__a14 * __b14) + (__a15 * __b15)
+/// operands: \n
+/// \a R0 := (\a __a0 * \a __b0) + (\a __a1 * \a __b1) \n
+/// \a R1 := (\a __a2 * \a __b2) + (\a __a3 * \a __b3) \n
+/// \a R2 := (\a __a4 * \a __b4) + (\a __a5 * \a __b5) \n
+/// \a R3 := (\a __a6 * \a __b6) + (\a __a7 * \a __b7) \n
+/// \a R4 := (\a __a8 * \a __b8) + (\a __a9 * \a __b9) \n
+/// \a R5 := (\a __a10 * \a __b10) + (\a __a11 * \a __b11) \n
+/// \a R6 := (\a __a12 * \a __b12) + (\a __a13 * \a __b13) \n
+/// \a R7 := (\a __a14 * \a __b14) + (\a __a15 * \a __b15)
static __inline__ __m128i __DEFAULT_FN_ATTRS
_mm_maddubs_epi16(__m128i __a, __m128i __b)
{
@@ -516,11 +516,11 @@ _mm_maddubs_epi16(__m128i __a, __m128i __b)
/// \param __b
/// A 64-bit integer vector containing the second source operand.
/// \returns A 64-bit integer vector containing the sums of products of both
-/// operands:
-/// R0 := (__a0 * __b0) + (__a1 * __b1)
-/// R1 := (__a2 * __b2) + (__a3 * __b3)
-/// R2 := (__a4 * __b4) + (__a5 * __b5)
-/// R3 := (__a6 * __b6) + (__a7 * __b7)
+/// operands: \n
+/// \a R0 := (\a __a0 * \a __b0) + (\a __a1 * \a __b1) \n
+/// \a R1 := (\a __a2 * \a __b2) + (\a __a3 * \a __b3) \n
+/// \a R2 := (\a __a4 * \a __b4) + (\a __a5 * \a __b5) \n
+/// \a R3 := (\a __a6 * \a __b6) + (\a __a7 * \a __b7)
static __inline__ __m64 __DEFAULT_FN_ATTRS
_mm_maddubs_pi16(__m64 __a, __m64 __b)
{
@@ -580,11 +580,11 @@ _mm_mulhrs_pi16(__m64 __a, __m64 __b)
/// \param __b
/// A 128-bit integer vector containing control bytes corresponding to
/// positions in the destination:
-/// Bit 7:
-/// 1: Clear the corresponding byte in the destination.
+/// Bit 7: \n
+/// 1: Clear the corresponding byte in the destination. \n
/// 0: Copy the selected source byte to the corresponding byte in the
-/// destination.
-/// Bits [6:4] Reserved.
+/// destination. \n
+/// Bits [6:4] Reserved. \n
/// Bits [3:0] select the source byte to be copied.
/// \returns A 128-bit integer vector containing the copied or cleared values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
@@ -606,10 +606,10 @@ _mm_shuffle_epi8(__m128i __a, __m128i __b)
/// \param __b
/// A 64-bit integer vector containing control bytes corresponding to
/// positions in the destination:
-/// Bit 7:
-/// 1: Clear the corresponding byte in the destination.
+/// Bit 7: \n
+/// 1: Clear the corresponding byte in the destination. \n
/// 0: Copy the selected source byte to the corresponding byte in the
-/// destination.
+/// destination. \n
/// Bits [3:0] select the source byte to be copied.
/// \returns A 64-bit integer vector containing the copied or cleared values.
static __inline__ __m64 __DEFAULT_FN_ATTRS
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index 99cddb0fac82..dc31b85cfd7c 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -46,7 +46,7 @@ typedef unsigned int __v4su __attribute__((__vector_size__(16)));
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VADDSS / ADDSS instructions.
+/// This intrinsic corresponds to the <c> VADDSS / ADDSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the source operands.
@@ -69,7 +69,7 @@ _mm_add_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VADDPS / ADDPS instructions.
+/// This intrinsic corresponds to the <c> VADDPS / ADDPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the source operands.
@@ -88,7 +88,7 @@ _mm_add_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VSUBSS / SUBSS instructions.
+/// This intrinsic corresponds to the <c> VSUBSS / SUBSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing the minuend. The lower 32 bits
@@ -112,7 +112,7 @@ _mm_sub_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VSUBPS / SUBPS instructions.
+/// This intrinsic corresponds to the <c> VSUBPS / SUBPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing the minuend.
@@ -131,7 +131,7 @@ _mm_sub_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMULSS / MULSS instructions.
+/// This intrinsic corresponds to the <c> VMULSS / MULSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the source operands.
@@ -154,7 +154,7 @@ _mm_mul_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMULPS / MULPS instructions.
+/// This intrinsic corresponds to the <c> VMULPS / MULPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the source operands.
@@ -173,7 +173,7 @@ _mm_mul_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VDIVSS / DIVSS instructions.
+/// This intrinsic corresponds to the <c> VDIVSS / DIVSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing the dividend. The lower 32
@@ -195,7 +195,7 @@ _mm_div_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VDIVPS / DIVPS instructions.
+/// This intrinsic corresponds to the <c> VDIVPS / DIVPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing the dividend.
@@ -214,7 +214,7 @@ _mm_div_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VSQRTSS / SQRTSS instructions.
+/// This intrinsic corresponds to the <c> VSQRTSS / SQRTSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -233,7 +233,7 @@ _mm_sqrt_ss(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VSQRTPS / SQRTPS instructions.
+/// This intrinsic corresponds to the <c> VSQRTPS / SQRTPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -250,7 +250,7 @@ _mm_sqrt_ps(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VRCPSS / RCPSS instructions.
+/// This intrinsic corresponds to the <c> VRCPSS / RCPSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -269,7 +269,7 @@ _mm_rcp_ss(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VRCPPS / RCPPS instructions.
+/// This intrinsic corresponds to the <c> VRCPPS / RCPPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -286,7 +286,7 @@ _mm_rcp_ps(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VRSQRTSS / RSQRTSS instructions.
+/// This intrinsic corresponds to the <c> VRSQRTSS / RSQRTSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -306,7 +306,7 @@ _mm_rsqrt_ss(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VRSQRTPS / RSQRTPS instructions.
+/// This intrinsic corresponds to the <c> VRSQRTPS / RSQRTPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -324,7 +324,7 @@ _mm_rsqrt_ps(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMINSS / MINSS instructions.
+/// This intrinsic corresponds to the <c> VMINSS / MINSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -341,12 +341,12 @@ _mm_min_ss(__m128 __a, __m128 __b)
return __builtin_ia32_minss((__v4sf)__a, (__v4sf)__b);
}
-/// \brief Compares two 128-bit vectors of [4 x float] and returns the
-/// lesser of each pair of values.
+/// \brief Compares two 128-bit vectors of [4 x float] and returns the lesser
+/// of each pair of values.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMINPS / MINPS instructions.
+/// This intrinsic corresponds to the <c> VMINPS / MINPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands.
@@ -361,12 +361,12 @@ _mm_min_ps(__m128 __a, __m128 __b)
}
/// \brief Compares two 32-bit float values in the low-order bits of both
-/// operands and returns the greater value in the low-order bits of
-/// a vector [4 x float].
+/// operands and returns the greater value in the low-order bits of a 128-bit
+/// vector of [4 x float].
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMAXSS / MAXSS instructions.
+/// This intrinsic corresponds to the <c> VMAXSS / MAXSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -388,7 +388,7 @@ _mm_max_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMAXPS / MAXPS instructions.
+/// This intrinsic corresponds to the <c> VMAXPS / MAXPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands.
@@ -406,7 +406,7 @@ _mm_max_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VANDPS / ANDPS instructions.
+/// This intrinsic corresponds to the <c> VANDPS / ANDPS </c> instructions.
///
/// \param __a
/// A 128-bit vector containing one of the source operands.
@@ -426,7 +426,7 @@ _mm_and_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VANDNPS / ANDNPS instructions.
+/// This intrinsic corresponds to the <c> VANDNPS / ANDNPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing the first source operand. The
@@ -446,7 +446,7 @@ _mm_andnot_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VORPS / ORPS instructions.
+/// This intrinsic corresponds to the <c> VORPS / ORPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the source operands.
@@ -465,7 +465,7 @@ _mm_or_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VXORPS / XORPS instructions.
+/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the source operands.
@@ -485,7 +485,7 @@ _mm_xor_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPEQSS / CMPEQSS instructions.
+/// This intrinsic corresponds to the <c> VCMPEQSS / CMPEQSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -506,7 +506,7 @@ _mm_cmpeq_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPEQPS / CMPEQPS instructions.
+/// This intrinsic corresponds to the <c> VCMPEQPS / CMPEQPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -526,7 +526,7 @@ _mm_cmpeq_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPLTSS / CMPLTSS instructions.
+/// This intrinsic corresponds to the <c> VCMPLTSS / CMPLTSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -548,7 +548,7 @@ _mm_cmplt_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPLTPS / CMPLTPS instructions.
+/// This intrinsic corresponds to the <c> VCMPLTPS / CMPLTPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -569,7 +569,7 @@ _mm_cmplt_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPLESS / CMPLESS instructions.
+/// This intrinsic corresponds to the <c> VCMPLESS / CMPLESS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -591,7 +591,7 @@ _mm_cmple_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPLEPS / CMPLEPS instructions.
+/// This intrinsic corresponds to the <c> VCMPLEPS / CMPLEPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -611,7 +611,7 @@ _mm_cmple_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPLTSS / CMPLTSS instructions.
+/// This intrinsic corresponds to the <c> VCMPLTSS / CMPLTSS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -635,7 +635,7 @@ _mm_cmpgt_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPLTPS / CMPLTPS instructions.
+/// This intrinsic corresponds to the <c> VCMPLTPS / CMPLTPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -656,7 +656,7 @@ _mm_cmpgt_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPLESS / CMPLESS instructions.
+/// This intrinsic corresponds to the <c> VCMPLESS / CMPLESS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -680,7 +680,7 @@ _mm_cmpge_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPLEPS / CMPLEPS instructions.
+/// This intrinsic corresponds to the <c> VCMPLEPS / CMPLEPS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -699,7 +699,8 @@ _mm_cmpge_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPNEQSS / CMPNEQSS instructions.
+/// This intrinsic corresponds to the <c> VCMPNEQSS / CMPNEQSS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -720,7 +721,8 @@ _mm_cmpneq_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPNEQPS / CMPNEQPS instructions.
+/// This intrinsic corresponds to the <c> VCMPNEQPS / CMPNEQPS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -740,7 +742,8 @@ _mm_cmpneq_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPNLTSS / CMPNLTSS instructions.
+/// This intrinsic corresponds to the <c> VCMPNLTSS / CMPNLTSS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -762,7 +765,8 @@ _mm_cmpnlt_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPNLTPS / CMPNLTPS instructions.
+/// This intrinsic corresponds to the <c> VCMPNLTPS / CMPNLTPS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -783,7 +787,8 @@ _mm_cmpnlt_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPNLESS / CMPNLESS instructions.
+/// This intrinsic corresponds to the <c> VCMPNLESS / CMPNLESS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -805,7 +810,8 @@ _mm_cmpnle_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPNLEPS / CMPNLEPS instructions.
+/// This intrinsic corresponds to the <c> VCMPNLEPS / CMPNLEPS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -826,7 +832,8 @@ _mm_cmpnle_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPNLTSS / CMPNLTSS instructions.
+/// This intrinsic corresponds to the <c> VCMPNLTSS / CMPNLTSS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -850,7 +857,8 @@ _mm_cmpngt_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPNLTPS / CMPNLTPS instructions.
+/// This intrinsic corresponds to the <c> VCMPNLTPS / CMPNLTPS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -871,7 +879,8 @@ _mm_cmpngt_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPNLESS / CMPNLESS instructions.
+/// This intrinsic corresponds to the <c> VCMPNLESS / CMPNLESS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -895,7 +904,8 @@ _mm_cmpnge_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPNLEPS / CMPNLEPS instructions.
+/// This intrinsic corresponds to the <c> VCMPNLEPS / CMPNLEPS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -916,7 +926,8 @@ _mm_cmpnge_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPORDSS / CMPORDSS instructions.
+/// This intrinsic corresponds to the <c> VCMPORDSS / CMPORDSS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -938,7 +949,8 @@ _mm_cmpord_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPORDPS / CMPORDPS instructions.
+/// This intrinsic corresponds to the <c> VCMPORDPS / CMPORDPS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -959,7 +971,8 @@ _mm_cmpord_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPUNORDSS / CMPUNORDSS instructions.
+/// This intrinsic corresponds to the <c> VCMPUNORDSS / CMPUNORDSS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float] containing one of the operands. The lower
@@ -981,7 +994,8 @@ _mm_cmpunord_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCMPUNORDPS / CMPUNORDPS instructions.
+/// This intrinsic corresponds to the <c> VCMPUNORDPS / CMPUNORDPS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -999,7 +1013,8 @@ _mm_cmpunord_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1020,7 +1035,8 @@ _mm_comieq_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1041,7 +1057,7 @@ _mm_comilt_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1062,7 +1078,7 @@ _mm_comile_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1083,7 +1099,7 @@ _mm_comigt_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1104,7 +1120,7 @@ _mm_comige_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCOMISS / COMISS instructions.
+/// This intrinsic corresponds to the <c> VCOMISS / COMISS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1125,7 +1141,7 @@ _mm_comineq_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1146,7 +1162,7 @@ _mm_ucomieq_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1162,13 +1178,13 @@ _mm_ucomilt_ss(__m128 __a, __m128 __b)
}
/// \brief Performs an unordered comparison of two 32-bit float values using
-/// the low-order bits of both operands to determine if the first operand
-/// is less than or equal to the second operand and returns the result of
-/// the comparison.
+/// the low-order bits of both operands to determine if the first operand is
+/// less than or equal to the second operand and returns the result of the
+/// comparison.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1184,13 +1200,13 @@ _mm_ucomile_ss(__m128 __a, __m128 __b)
}
/// \brief Performs an unordered comparison of two 32-bit float values using
-/// the low-order bits of both operands to determine if the first operand
-/// is greater than the second operand and returns the result of the
+/// the low-order bits of both operands to determine if the first operand is
+/// greater than the second operand and returns the result of the
/// comparison.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1212,7 +1228,7 @@ _mm_ucomigt_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1233,7 +1249,7 @@ _mm_ucomige_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VUCOMISS / UCOMISS instructions.
+/// This intrinsic corresponds to the <c> VUCOMISS / UCOMISS </c> instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1253,7 +1269,8 @@ _mm_ucomineq_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTSS2SI / CVTSS2SI instructions.
+/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1270,7 +1287,8 @@ _mm_cvtss_si32(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTSS2SI / CVTSS2SI instructions.
+/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1289,7 +1307,8 @@ _mm_cvt_ss2si(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTSS2SI / CVTSS2SI instructions.
+/// This intrinsic corresponds to the <c> VCVTSS2SI / CVTSS2SI </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1308,7 +1327,7 @@ _mm_cvtss_si64(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPS2PI instruction.
+/// This intrinsic corresponds to the <c> CVTPS2PI </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1324,7 +1343,7 @@ _mm_cvtps_pi32(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPS2PI instruction.
+/// This intrinsic corresponds to the <c> CVTPS2PI </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1341,7 +1360,8 @@ _mm_cvt_ps2pi(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTTSS2SI / CVTTSS2SI instructions.
+/// This intrinsic corresponds to the <c> VCVTTSS2SI / CVTTSS2SI </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1359,7 +1379,8 @@ _mm_cvttss_si32(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTTSS2SI / CVTTSS2SI instructions.
+/// This intrinsic corresponds to the <c> VCVTTSS2SI / CVTTSS2SI </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1371,13 +1392,15 @@ _mm_cvtt_ss2si(__m128 __a)
return _mm_cvttss_si32(__a);
}
+#ifdef __x86_64__
/// \brief Converts a float value contained in the lower 32 bits of a vector of
/// [4 x float] into a 64-bit integer, truncating the result when it is
/// inexact.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTTSS2SI / CVTTSS2SI instructions.
+/// This intrinsic corresponds to the <c> VCVTTSS2SI / CVTTSS2SI </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1388,6 +1411,7 @@ _mm_cvttss_si64(__m128 __a)
{
return __builtin_ia32_cvttss2si64((__v4sf)__a);
}
+#endif
/// \brief Converts two low-order float values in a 128-bit vector of
/// [4 x float] into a 64-bit vector of [2 x i32], truncating the result
@@ -1395,7 +1419,8 @@ _mm_cvttss_si64(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTTPS2PI / VTTPS2PI instructions.
+/// This intrinsic corresponds to the <c> CVTTPS2PI / VTTPS2PI </c>
+/// instructions.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1412,7 +1437,7 @@ _mm_cvttps_pi32(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTTPS2PI instruction.
+/// This intrinsic corresponds to the <c> CVTTPS2PI </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1430,7 +1455,7 @@ _mm_cvtt_ps2pi(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTSI2SS / CVTSI2SS instruction.
+/// This intrinsic corresponds to the <c> VCVTSI2SS / CVTSI2SS </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1453,7 +1478,7 @@ _mm_cvtsi32_ss(__m128 __a, int __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTSI2SS / CVTSI2SS instruction.
+/// This intrinsic corresponds to the <c> VCVTSI2SS / CVTSI2SS </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1477,7 +1502,7 @@ _mm_cvt_si2ss(__m128 __a, int __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VCVTSI2SS / CVTSI2SS instruction.
+/// This intrinsic corresponds to the <c> VCVTSI2SS / CVTSI2SS </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1502,7 +1527,7 @@ _mm_cvtsi64_ss(__m128 __a, long long __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPI2PS instruction.
+/// This intrinsic corresponds to the <c> CVTPI2PS </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1525,7 +1550,7 @@ _mm_cvtpi32_ps(__m128 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPI2PS instruction.
+/// This intrinsic corresponds to the <c> CVTPI2PS </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float].
@@ -1546,7 +1571,7 @@ _mm_cvt_pi2ps(__m128 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float]. The lower 32 bits of this operand are
@@ -1558,13 +1583,13 @@ _mm_cvtss_f32(__m128 __a)
return __a[0];
}
-/// \brief Loads two packed float values from the address __p into the
+/// \brief Loads two packed float values from the address \a __p into the
/// high-order bits of a 128-bit vector of [4 x float]. The low-order bits
/// are copied from the low-order bits of the first operand.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVHPD / MOVHPD instruction.
+/// This intrinsic corresponds to the <c> VMOVHPD / MOVHPD </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float]. Bits [63:0] are written to bits [63:0]
@@ -1585,13 +1610,13 @@ _mm_loadh_pi(__m128 __a, const __m64 *__p)
return __builtin_shufflevector(__a, __bb, 0, 1, 4, 5);
}
-/// \brief Loads two packed float values from the address __p into the low-order
-/// bits of a 128-bit vector of [4 x float]. The high-order bits are copied
-/// from the high-order bits of the first operand.
+/// \brief Loads two packed float values from the address \a __p into the
+/// low-order bits of a 128-bit vector of [4 x float]. The high-order bits
+/// are copied from the high-order bits of the first operand.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVLPD / MOVLPD instruction.
+/// This intrinsic corresponds to the <c> VMOVLPD / MOVLPD </c> instruction.
///
/// \param __a
/// A 128-bit vector of [4 x float]. Bits [127:64] are written to bits
@@ -1619,7 +1644,7 @@ _mm_loadl_pi(__m128 __a, const __m64 *__p)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
///
/// \param __p
/// A pointer to a 32-bit memory location containing a single-precision
@@ -1642,13 +1667,13 @@ _mm_load_ss(const float *__p)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVSS / MOVSS + \c shuffling
+/// This intrinsic corresponds to the <c> VMOVSS / MOVSS + shuffling </c>
/// instruction.
///
/// \param __p
/// A pointer to a float value to be loaded and duplicated.
-/// \returns A 128-bit vector of [4 x float] containing the loaded
-/// and duplicated values.
+/// \returns A 128-bit vector of [4 x float] containing the loaded and
+/// duplicated values.
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_load1_ps(const float *__p)
{
@@ -1666,7 +1691,7 @@ _mm_load1_ps(const float *__p)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS instruction.
+/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
///
/// \param __p
/// A pointer to a 128-bit memory location. The address of the memory
@@ -1683,7 +1708,7 @@ _mm_load_ps(const float *__p)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVUPS / MOVUPS instruction.
+/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
///
/// \param __p
/// A pointer to a 128-bit memory location. The address of the memory
@@ -1703,7 +1728,7 @@ _mm_loadu_ps(const float *__p)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS + \c shuffling
+/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS + shuffling </c>
/// instruction.
///
/// \param __p
@@ -1725,7 +1750,6 @@ _mm_loadr_ps(const float *__p)
/// This intrinsic has no corresponding instruction.
///
/// \returns A 128-bit vector of [4 x float] containing undefined values.
-
static __inline__ __m128 __DEFAULT_FN_ATTRS
_mm_undefined_ps(void)
{
@@ -1738,7 +1762,7 @@ _mm_undefined_ps(void)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
///
/// \param __w
/// A single-precision floating-point value used to initialize the lower 32
@@ -1758,7 +1782,7 @@ _mm_set_ss(float __w)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+/// This intrinsic corresponds to the <c> VPERMILPS / PERMILPS </c> instruction.
///
/// \param __w
/// A single-precision floating-point value used to initialize each vector
@@ -1777,7 +1801,7 @@ _mm_set1_ps(float __w)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPERMILPS / PERMILPS instruction.
+/// This intrinsic corresponds to the <c> VPERMILPS / PERMILPS </c> instruction.
///
/// \param __w
/// A single-precision floating-point value used to initialize each vector
@@ -1849,7 +1873,7 @@ _mm_setr_ps(float __z, float __y, float __x, float __w)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VXORPS / XORPS instruction.
+/// This intrinsic corresponds to the <c> VXORPS / XORPS </c> instruction.
///
/// \returns An initialized 128-bit floating-point vector of [4 x float] with
/// all elements set to zero.
@@ -1864,7 +1888,7 @@ _mm_setzero_ps(void)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPEXTRQ / MOVQ instruction.
+/// This intrinsic corresponds to the <c> VPEXTRQ / MOVQ </c> instruction.
///
/// \param __p
/// A pointer to a 64-bit memory location.
@@ -1881,7 +1905,7 @@ _mm_storeh_pi(__m64 *__p, __m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVLPS / MOVLPS instruction.
+/// This intrinsic corresponds to the <c> VMOVLPS / MOVLPS </c> instruction.
///
/// \param __p
/// A pointer to a memory location that will receive the float values.
@@ -1898,7 +1922,7 @@ _mm_storel_pi(__m64 *__p, __m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
///
/// \param __p
/// A pointer to a 32-bit memory location.
@@ -1913,12 +1937,12 @@ _mm_store_ss(float *__p, __m128 __a)
((struct __mm_store_ss_struct*)__p)->__u = __a[0];
}
-/// \brief Stores float values from a 128-bit vector of [4 x float] to an
-/// unaligned memory location.
+/// \brief Stores a 128-bit vector of [4 x float] to an unaligned memory
+/// location.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVUPS / MOVUPS instruction.
+/// This intrinsic corresponds to the <c> VMOVUPS / MOVUPS </c> instruction.
///
/// \param __p
/// A pointer to a 128-bit memory location. The address of the memory
@@ -1934,19 +1958,18 @@ _mm_storeu_ps(float *__p, __m128 __a)
((struct __storeu_ps*)__p)->__v = __a;
}
-/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into
-/// four contiguous elements in an aligned memory location.
+/// \brief Stores a 128-bit vector of [4 x float] into an aligned memory
+/// location.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to \c VMOVAPS / MOVAPS + \c shuffling
-/// instruction.
+/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS </c> instruction.
///
/// \param __p
-/// A pointer to a 128-bit memory location.
+/// A pointer to a 128-bit memory location. The address of the memory
+/// location has to be 16-byte aligned.
/// \param __a
-/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
-/// of the four contiguous elements pointed by __p.
+/// A 128-bit vector of [4 x float] containing the values to be stored.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_store_ps(float *__p, __m128 __a)
{
@@ -1958,14 +1981,14 @@ _mm_store_ps(float *__p, __m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to \c VMOVAPS / MOVAPS + \c shuffling
+/// This intrinsic corresponds to <c> VMOVAPS / MOVAPS + shuffling </c>
/// instruction.
///
/// \param __p
/// A pointer to a 128-bit memory location.
/// \param __a
/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
-/// of the four contiguous elements pointed by __p.
+/// of the four contiguous elements pointed by \a __p.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_store1_ps(float *__p, __m128 __a)
{
@@ -1973,18 +1996,19 @@ _mm_store1_ps(float *__p, __m128 __a)
_mm_store_ps(__p, __a);
}
-/// \brief Stores float values from a 128-bit vector of [4 x float] to an
-/// aligned memory location.
+/// \brief Stores the lower 32 bits of a 128-bit vector of [4 x float] into
+/// four contiguous elements in an aligned memory location.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS instruction.
+/// This intrinsic corresponds to <c> VMOVAPS / MOVAPS + shuffling </c>
+/// instruction.
///
/// \param __p
-/// A pointer to a 128-bit memory location. The address of the memory
-/// location has to be 128-bit aligned.
+/// A pointer to a 128-bit memory location.
/// \param __a
-/// A 128-bit vector of [4 x float] containing the values to be stored.
+/// A 128-bit vector of [4 x float] whose lower 32 bits are stored to each
+/// of the four contiguous elements pointed by \a __p.
static __inline__ void __DEFAULT_FN_ATTRS
_mm_store_ps1(float *__p, __m128 __a)
{
@@ -1996,7 +2020,7 @@ _mm_store_ps1(float *__p, __m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVAPS / MOVAPS + \c shuffling
+/// This intrinsic corresponds to the <c> VMOVAPS / MOVAPS + shuffling </c>
/// instruction.
///
/// \param __p
@@ -2029,20 +2053,21 @@ _mm_storer_ps(float *__p, __m128 __a)
/// void _mm_prefetch(const void * a, const int sel);
/// \endcode
///
-/// This intrinsic corresponds to the \c PREFETCHNTA instruction.
+/// This intrinsic corresponds to the <c> PREFETCHNTA </c> instruction.
///
/// \param a
/// A pointer to a memory location containing a cache line of data.
/// \param sel
-/// A predefined integer constant specifying the type of prefetch operation:
-/// _MM_HINT_NTA: Move data using the non-temporal access (NTA) hint.
-/// The PREFETCHNTA instruction will be generated.
+/// A predefined integer constant specifying the type of prefetch
+/// operation: \n
+/// _MM_HINT_NTA: Move data using the non-temporal access (NTA) hint. The
+/// PREFETCHNTA instruction will be generated. \n
/// _MM_HINT_T0: Move data using the T0 hint. The PREFETCHT0 instruction will
-/// be generated.
+/// be generated. \n
/// _MM_HINT_T1: Move data using the T1 hint. The PREFETCHT1 instruction will
-/// be generated.
+/// be generated. \n
/// _MM_HINT_T2: Move data using the T2 hint. The PREFETCHT2 instruction will
-/// be generated.
+/// be generated.
#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
#endif
@@ -2052,7 +2077,7 @@ _mm_storer_ps(float *__p, __m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c MOVNTQ instruction.
+/// This intrinsic corresponds to the <c> MOVNTQ </c> instruction.
///
/// \param __p
/// A pointer to an aligned memory location used to store the register value.
@@ -2070,7 +2095,7 @@ _mm_stream_pi(__m64 *__p, __m64 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVNTPS / MOVNTPS instruction.
+/// This intrinsic corresponds to the <c> VMOVNTPS / MOVNTPS </c> instruction.
///
/// \param __p
/// A pointer to a 128-bit aligned memory location that will receive the
@@ -2083,6 +2108,10 @@ _mm_stream_ps(float *__p, __m128 __a)
__builtin_nontemporal_store((__v4sf)__a, (__v4sf*)__p);
}
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/// \brief Forces strong memory ordering (serialization) between store
/// instructions preceding this instruction and store instructions following
/// this instruction, ensuring the system completes all previous stores
@@ -2090,28 +2119,32 @@ _mm_stream_ps(float *__p, __m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c SFENCE instruction.
+/// This intrinsic corresponds to the <c> SFENCE </c> instruction.
///
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_sfence(void)
-{
- __builtin_ia32_sfence();
-}
+void _mm_sfence(void);
+
+#if defined(__cplusplus)
+} // extern "C"
+#endif
/// \brief Extracts 16-bit element from a 64-bit vector of [4 x i16] and
/// returns it, as specified by the immediate integer operand.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPEXTRW / PEXTRW instruction.
+/// \code
+/// void _mm_extract_pi(__m64 a, int n);
+/// \endcode
///
-/// \param __a
+/// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
+///
+/// \param a
/// A 64-bit vector of [4 x i16].
-/// \param __n
-/// An immediate integer operand that determines which bits are extracted:
-/// 0: Bits [15:0] are copied to the destination.
-/// 1: Bits [31:16] are copied to the destination.
-/// 2: Bits [47:32] are copied to the destination.
+/// \param n
+/// An immediate integer operand that determines which bits are extracted: \n
+/// 0: Bits [15:0] are copied to the destination. \n
+/// 1: Bits [31:16] are copied to the destination. \n
+/// 2: Bits [47:32] are copied to the destination. \n
/// 3: Bits [63:48] are copied to the destination.
/// \returns A 16-bit integer containing the extracted 16 bits of packed data.
#define _mm_extract_pi16(a, n) __extension__ ({ \
@@ -2119,26 +2152,30 @@ _mm_sfence(void)
/// \brief Copies data from the 64-bit vector of [4 x i16] to the destination,
/// and inserts the lower 16-bits of an integer operand at the 16-bit offset
-/// specified by the immediate operand __n.
+/// specified by the immediate operand \a n.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VPINSRW / PINSRW instruction.
+/// \code
+/// void _mm_insert_pi(__m64 a, int d, int n);
+/// \endcode
///
-/// \param __a
+/// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
+///
+/// \param a
/// A 64-bit vector of [4 x i16].
-/// \param __d
+/// \param d
/// An integer. The lower 16-bit value from this operand is written to the
-/// destination at the offset specified by operand __n.
-/// \param __n
+/// destination at the offset specified by operand \a n.
+/// \param n
/// An immediate integer operant that determines which the bits to be used
-/// in the destination.
-/// 0: Bits [15:0] are copied to the destination.
-/// 1: Bits [31:16] are copied to the destination.
-/// 2: Bits [47:32] are copied to the destination.
-/// 3: Bits [63:48] are copied to the destination.
+/// in the destination. \n
+/// 0: Bits [15:0] are copied to the destination. \n
+/// 1: Bits [31:16] are copied to the destination. \n
+/// 2: Bits [47:32] are copied to the destination. \n
+/// 3: Bits [63:48] are copied to the destination. \n
/// The remaining bits in the destination are copied from the corresponding
-/// bits in operand __a.
+/// bits in operand \a a.
/// \returns A 64-bit integer vector containing the copied packed data from the
/// operands.
#define _mm_insert_pi16(a, d, n) __extension__ ({ \
@@ -2150,7 +2187,7 @@ _mm_sfence(void)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PMAXSW instruction.
+/// This intrinsic corresponds to the <c> PMAXSW </c> instruction.
///
/// \param __a
/// A 64-bit integer vector containing one of the source operands.
@@ -2169,7 +2206,7 @@ _mm_max_pi16(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PMAXUB instruction.
+/// This intrinsic corresponds to the <c> PMAXUB </c> instruction.
///
/// \param __a
/// A 64-bit integer vector containing one of the source operands.
@@ -2188,7 +2225,7 @@ _mm_max_pu8(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PMINSW instruction.
+/// This intrinsic corresponds to the <c> PMINSW </c> instruction.
///
/// \param __a
/// A 64-bit integer vector containing one of the source operands.
@@ -2207,7 +2244,7 @@ _mm_min_pi16(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PMINUB instruction.
+/// This intrinsic corresponds to the <c> PMINUB </c> instruction.
///
/// \param __a
/// A 64-bit integer vector containing one of the source operands.
@@ -2226,7 +2263,7 @@ _mm_min_pu8(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PMOVMSKB instruction.
+/// This intrinsic corresponds to the <c> PMOVMSKB </c> instruction.
///
/// \param __a
/// A 64-bit integer vector containing the values with bits to be extracted.
@@ -2244,7 +2281,7 @@ _mm_movemask_pi8(__m64 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PMULHUW instruction.
+/// This intrinsic corresponds to the <c> PMULHUW </c> instruction.
///
/// \param __a
/// A 64-bit integer vector containing one of the source operands.
@@ -2262,27 +2299,31 @@ _mm_mulhi_pu16(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSHUFW instruction.
-///
/// \code
/// __m64 _mm_shuffle_pi16(__m64 a, const int n);
/// \endcode
///
+/// This intrinsic corresponds to the <c> PSHUFW </c> instruction.
+///
/// \param a
/// A 64-bit integer vector containing the values to be shuffled.
/// \param n
/// An immediate value containing an 8-bit value specifying which elements to
-/// copy from a. The destinations within the 64-bit destination are assigned
-/// values as follows:
-/// Bits [1:0] are used to assign values to bits [15:0] in the destination.
-/// Bits [3:2] are used to assign values to bits [31:16] in the destination.
-/// Bits [5:4] are used to assign values to bits [47:32] in the destination.
-/// Bits [7:6] are used to assign values to bits [63:48] in the destination.
-/// Bit value assignments:
-/// 00: assigned from bits [15:0] of a.
-/// 01: assigned from bits [31:16] of a.
-/// 10: assigned from bits [47:32] of a.
-/// 11: assigned from bits [63:48] of a.
+/// copy from \a a. The destinations within the 64-bit destination are
+/// assigned values as follows: \n
+/// Bits [1:0] are used to assign values to bits [15:0] in the
+/// destination. \n
+/// Bits [3:2] are used to assign values to bits [31:16] in the
+/// destination. \n
+/// Bits [5:4] are used to assign values to bits [47:32] in the
+/// destination. \n
+/// Bits [7:6] are used to assign values to bits [63:48] in the
+/// destination. \n
+/// Bit value assignments: \n
+/// 00: assigned from bits [15:0] of \a a. \n
+/// 01: assigned from bits [31:16] of \a a. \n
+/// 10: assigned from bits [47:32] of \a a. \n
+/// 11: assigned from bits [63:48] of \a a.
/// \returns A 64-bit integer vector containing the shuffled values.
#define _mm_shuffle_pi16(a, n) __extension__ ({ \
(__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)); })
@@ -2295,15 +2336,15 @@ _mm_mulhi_pu16(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c MASKMOVQ instruction.
+/// This intrinsic corresponds to the <c> MASKMOVQ </c> instruction.
///
/// \param __d
/// A 64-bit integer vector containing the values with elements to be copied.
/// \param __n
/// A 64-bit integer vector operand. The most significant bit from each 8-bit
-/// element determines whether the corresponding element in operand __d is
-/// copied. If the most significant bit of a given element is 1, the
-/// corresponding element in operand __d is copied.
+/// element determines whether the corresponding element in operand \a __d
+/// is copied. If the most significant bit of a given element is 1, the
+/// corresponding element in operand \a __d is copied.
/// \param __p
/// A pointer to a 64-bit memory location that will receive the conditionally
/// copied integer values. The address of the memory location does not have
@@ -2320,7 +2361,7 @@ _mm_maskmove_si64(__m64 __d, __m64 __n, char *__p)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PAVGB instruction.
+/// This intrinsic corresponds to the <c> PAVGB </c> instruction.
///
/// \param __a
/// A 64-bit integer vector containing one of the source operands.
@@ -2339,7 +2380,7 @@ _mm_avg_pu8(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PAVGW instruction.
+/// This intrinsic corresponds to the <c> PAVGW </c> instruction.
///
/// \param __a
/// A 64-bit integer vector containing one of the source operands.
@@ -2359,7 +2400,7 @@ _mm_avg_pu16(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c PSADBW instruction.
+/// This intrinsic corresponds to the <c> PSADBW </c> instruction.
///
/// \param __a
/// A 64-bit integer vector containing one of the source operands.
@@ -2374,24 +2415,42 @@ _mm_sad_pu8(__m64 __a, __m64 __b)
return (__m64)__builtin_ia32_psadbw((__v8qi)__a, (__v8qi)__b);
}
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
/// \brief Returns the contents of the MXCSR register as a 32-bit unsigned
-/// integer value. There are several groups of macros associated with this
+/// integer value.
+///
+/// There are several groups of macros associated with this
/// intrinsic, including:
-/// * For checking exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
+/// <ul>
+/// <li>
+/// For checking exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,
/// _MM_EXCEPT_INEXACT. There is a convenience wrapper
/// _MM_GET_EXCEPTION_STATE().
-/// * For checking exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
+/// </li>
+/// <li>
+/// For checking exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.
/// There is a convenience wrapper _MM_GET_EXCEPTION_MASK().
-/// * For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
+/// </li>
+/// <li>
+/// For checking rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
/// _MM_GET_ROUNDING_MODE(x) where x is one of these macros.
-/// * For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
+/// </li>
+/// <li>
+/// For checking flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
/// There is a convenience wrapper _MM_GET_FLUSH_ZERO_MODE().
-/// * For checking denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
+/// </li>
+/// <li>
+/// For checking denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper
/// _MM_GET_DENORMALS_ZERO_MODE().
+/// </li>
+/// </ul>
///
/// For example, the expression below checks if an overflow exception has
/// occurred:
@@ -2402,35 +2461,45 @@ _mm_sad_pu8(__m64 __a, __m64 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VSTMXCSR / STMXCSR instruction.
+/// This intrinsic corresponds to the <c> VSTMXCSR / STMXCSR </c> instruction.
///
/// \returns A 32-bit unsigned integer containing the contents of the MXCSR
/// register.
-static __inline__ unsigned int __DEFAULT_FN_ATTRS
-_mm_getcsr(void)
-{
- return __builtin_ia32_stmxcsr();
-}
-
-/// \brief Sets the MXCSR register with the 32-bit unsigned integer value. There
-/// are several groups of macros associated with this intrinsic, including:
-/// * For setting exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
+unsigned int _mm_getcsr(void);
+
+/// \brief Sets the MXCSR register with the 32-bit unsigned integer value.
+///
+/// There are several groups of macros associated with this intrinsic,
+/// including:
+/// <ul>
+/// <li>
+/// For setting exception states: _MM_EXCEPT_INVALID, _MM_EXCEPT_DIV_ZERO,
/// _MM_EXCEPT_DENORM, _MM_EXCEPT_OVERFLOW, _MM_EXCEPT_UNDERFLOW,
/// _MM_EXCEPT_INEXACT. There is a convenience wrapper
/// _MM_SET_EXCEPTION_STATE(x) where x is one of these macros.
-/// * For setting exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
+/// </li>
+/// <li>
+/// For setting exception masks: _MM_MASK_UNDERFLOW, _MM_MASK_OVERFLOW,
/// _MM_MASK_INVALID, _MM_MASK_DENORM, _MM_MASK_DIV_ZERO, _MM_MASK_INEXACT.
/// There is a convenience wrapper _MM_SET_EXCEPTION_MASK(x) where x is one
/// of these macros.
-/// * For setting rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
+/// </li>
+/// <li>
+/// For setting rounding modes: _MM_ROUND_NEAREST, _MM_ROUND_DOWN,
/// _MM_ROUND_UP, _MM_ROUND_TOWARD_ZERO. There is a convenience wrapper
/// _MM_SET_ROUNDING_MODE(x) where x is one of these macros.
-/// * For setting flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
+/// </li>
+/// <li>
+/// For setting flush-to-zero mode: _MM_FLUSH_ZERO_ON, _MM_FLUSH_ZERO_OFF.
/// There is a convenience wrapper _MM_SET_FLUSH_ZERO_MODE(x) where x is
/// one of these macros.
-/// * For setting denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
+/// </li>
+/// <li>
+/// For setting denormals-are-zero mode: _MM_DENORMALS_ZERO_ON,
/// _MM_DENORMALS_ZERO_OFF. There is a convenience wrapper
/// _MM_SET_DENORMALS_ZERO_MODE(x) where x is one of these macros.
+/// </li>
+/// </ul>
///
/// For example, the following expression causes subsequent floating-point
/// operations to round up:
@@ -2444,15 +2513,15 @@ _mm_getcsr(void)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VLDMXCSR / LDMXCSR instruction.
+/// This intrinsic corresponds to the <c> VLDMXCSR / LDMXCSR </c> instruction.
///
/// \param __i
/// A 32-bit unsigned integer value to be written to the MXCSR register.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_setcsr(unsigned int __i)
-{
- __builtin_ia32_ldmxcsr(__i);
-}
+void _mm_setcsr(unsigned int);
+
+#if defined(__cplusplus)
+} // extern "C"
+#endif
/// \brief Selects 4 float values from the 128-bit operands of [4 x float], as
/// specified by the immediate value operand.
@@ -2463,7 +2532,7 @@ _mm_setcsr(unsigned int __i)
/// __m128 _mm_shuffle_ps(__m128 a, __m128 b, const int mask);
/// \endcode
///
-/// This intrinsic corresponds to the \c VSHUFPS / SHUFPS instruction.
+/// This intrinsic corresponds to the <c> VSHUFPS / SHUFPS </c> instruction.
///
/// \param a
/// A 128-bit vector of [4 x float].
@@ -2471,18 +2540,23 @@ _mm_setcsr(unsigned int __i)
/// A 128-bit vector of [4 x float].
/// \param mask
/// An immediate value containing an 8-bit value specifying which elements to
-/// copy from a and b.
-/// Bits [3:0] specify the values copied from operand a.
-/// Bits [7:4] specify the values copied from operand b. The destinations
-/// within the 128-bit destination are assigned values as follows:
-/// Bits [1:0] are used to assign values to bits [31:0] in the destination.
-/// Bits [3:2] are used to assign values to bits [63:32] in the destination.
-/// Bits [5:4] are used to assign values to bits [95:64] in the destination.
-/// Bits [7:6] are used to assign values to bits [127:96] in the destination.
-/// Bit value assignments:
-/// 00: Bits [31:0] copied from the specified operand.
-/// 01: Bits [63:32] copied from the specified operand.
-/// 10: Bits [95:64] copied from the specified operand.
+/// copy from \ a and \a b. \n
+/// Bits [3:0] specify the values copied from operand \a a. \n
+/// Bits [7:4] specify the values copied from operand \a b. \n
+/// The destinations within the 128-bit destination are assigned values as
+/// follows: \n
+/// Bits [1:0] are used to assign values to bits [31:0] in the
+/// destination. \n
+/// Bits [3:2] are used to assign values to bits [63:32] in the
+/// destination. \n
+/// Bits [5:4] are used to assign values to bits [95:64] in the
+/// destination. \n
+/// Bits [7:6] are used to assign values to bits [127:96] in the
+/// destination. \n
+/// Bit value assignments: \n
+/// 00: Bits [31:0] copied from the specified operand. \n
+/// 01: Bits [63:32] copied from the specified operand. \n
+/// 10: Bits [95:64] copied from the specified operand. \n
/// 11: Bits [127:96] copied from the specified operand.
/// \returns A 128-bit vector of [4 x float] containing the shuffled values.
#define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
@@ -2493,20 +2567,19 @@ _mm_setcsr(unsigned int __i)
4 + (((mask) >> 6) & 0x3)); })
/// \brief Unpacks the high-order (index 2,3) values from two 128-bit vectors of
-/// [4 x float] and interleaves them into a 128-bit vector of [4 x
-/// float].
+/// [4 x float] and interleaves them into a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VUNPCKHPS / UNPCKHPS instruction.
+/// This intrinsic corresponds to the <c> VUNPCKHPS / UNPCKHPS </c> instruction.
///
/// \param __a
-/// A 128-bit vector of [4 x float].
-/// Bits [95:64] are written to bits [31:0] of the destination.
+/// A 128-bit vector of [4 x float]. \n
+/// Bits [95:64] are written to bits [31:0] of the destination. \n
/// Bits [127:96] are written to bits [95:64] of the destination.
/// \param __b
/// A 128-bit vector of [4 x float].
-/// Bits [95:64] are written to bits [63:32] of the destination.
+/// Bits [95:64] are written to bits [63:32] of the destination. \n
/// Bits [127:96] are written to bits [127:96] of the destination.
/// \returns A 128-bit vector of [4 x float] containing the interleaved values.
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -2516,20 +2589,19 @@ _mm_unpackhi_ps(__m128 __a, __m128 __b)
}
/// \brief Unpacks the low-order (index 0,1) values from two 128-bit vectors of
-/// [4 x float] and interleaves them into a 128-bit vector of [4 x
-/// float].
+/// [4 x float] and interleaves them into a 128-bit vector of [4 x float].
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VUNPCKLPS / UNPCKLPS instruction.
+/// This intrinsic corresponds to the <c> VUNPCKLPS / UNPCKLPS </c> instruction.
///
/// \param __a
-/// A 128-bit vector of [4 x float].
-/// Bits [31:0] are written to bits [31:0] of the destination.
+/// A 128-bit vector of [4 x float]. \n
+/// Bits [31:0] are written to bits [31:0] of the destination. \n
/// Bits [63:32] are written to bits [95:64] of the destination.
/// \param __b
-/// A 128-bit vector of [4 x float].
-/// Bits [31:0] are written to bits [63:32] of the destination.
+/// A 128-bit vector of [4 x float]. \n
+/// Bits [31:0] are written to bits [63:32] of the destination. \n
/// Bits [63:32] are written to bits [127:96] of the destination.
/// \returns A 128-bit vector of [4 x float] containing the interleaved values.
static __inline__ __m128 __DEFAULT_FN_ATTRS
@@ -2544,7 +2616,7 @@ _mm_unpacklo_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVSS / MOVSS instruction.
+/// This intrinsic corresponds to the <c> VMOVSS / MOVSS </c> instruction.
///
/// \param __a
/// A 128-bit floating-point vector of [4 x float]. The upper 96 bits are
@@ -2565,7 +2637,7 @@ _mm_move_ss(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VUNPCKHPD / UNPCKHPD instruction.
+/// This intrinsic corresponds to the <c> VUNPCKHPD / UNPCKHPD </c> instruction.
///
/// \param __a
/// A 128-bit floating-point vector of [4 x float]. The upper 64 bits are
@@ -2586,7 +2658,7 @@ _mm_movehl_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VUNPCKLPD / UNPCKLPD instruction.
+/// This intrinsic corresponds to the <c> VUNPCKLPD / UNPCKLPD </c> instruction.
///
/// \param __a
/// A 128-bit floating-point vector of [4 x float]. The lower 64 bits are
@@ -2606,7 +2678,8 @@ _mm_movelh_ps(__m128 __a, __m128 __b)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> CVTPI2PS + \c COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A 64-bit vector of [4 x i16]. The elements of the destination are copied
@@ -2636,7 +2709,8 @@ _mm_cvtpi16_ps(__m64 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> CVTPI2PS + \c COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A 64-bit vector of 16-bit unsigned integer values. The elements of the
@@ -2665,7 +2739,8 @@ _mm_cvtpu16_ps(__m64 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> CVTPI2PS + \c COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A 64-bit vector of [8 x i8]. The elements of the destination are copied
@@ -2689,7 +2764,8 @@ _mm_cvtpi8_ps(__m64 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> CVTPI2PS + \c COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A 64-bit vector of unsigned 8-bit integer values. The elements of the
@@ -2713,7 +2789,8 @@ _mm_cvtpu8_ps(__m64 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPI2PS + \c COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> CVTPI2PS + \c COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A 64-bit vector of [2 x i32]. The lower elements of the destination are
@@ -2741,12 +2818,13 @@ _mm_cvtpi32x2_ps(__m64 __a, __m64 __b)
/// packs the results into a 64-bit integer vector of [4 x i16]. If the
/// floating-point element is NaN or infinity, or if the floating-point
/// element is greater than 0x7FFFFFFF or less than -0x8000, it is converted
-/// to 0x8000. Otherwise if the floating-point element is greater
-/// than 0x7FFF, it is converted to 0x7FFF.
+/// to 0x8000. Otherwise if the floating-point element is greater than
+/// 0x7FFF, it is converted to 0x7FFF.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPS2PI + \c COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> CVTPS2PI + \c COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// A 128-bit floating-point vector of [4 x float].
@@ -2770,12 +2848,13 @@ _mm_cvtps_pi16(__m128 __a)
/// [8 x i8]. The upper 32 bits of the vector are set to 0. If the
/// floating-point element is NaN or infinity, or if the floating-point
/// element is greater than 0x7FFFFFFF or less than -0x80, it is converted
-/// to 0x80. Otherwise if the floating-point element is greater
-/// than 0x7F, it is converted to 0x7F.
+/// to 0x80. Otherwise if the floating-point element is greater than 0x7F,
+/// it is converted to 0x7F.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c CVTPS2PI + \c COMPOSITE instruction.
+/// This intrinsic corresponds to the <c> CVTPS2PI + \c COMPOSITE </c>
+/// instruction.
///
/// \param __a
/// 128-bit floating-point vector of [4 x float].
@@ -2799,7 +2878,7 @@ _mm_cvtps_pi8(__m128 __a)
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the \c VMOVMSKPS / MOVMSKPS instruction.
+/// This intrinsic corresponds to the <c> VMOVMSKPS / MOVMSKPS </c> instruction.
///
/// \param __a
/// A 128-bit floating-point vector of [4 x float].
diff --git a/lib/Index/CommentToXML.cpp b/lib/Index/CommentToXML.cpp
index c4beef249466..ee066cc6d985 100644
--- a/lib/Index/CommentToXML.cpp
+++ b/lib/Index/CommentToXML.cpp
@@ -8,7 +8,6 @@
//===----------------------------------------------------------------------===//
#include "clang/Index/CommentToXML.h"
-#include "SimpleFormatContext.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Comment.h"
@@ -531,12 +530,8 @@ public:
CommentASTToXMLConverter(const FullComment *FC,
SmallVectorImpl<char> &Str,
const CommandTraits &Traits,
- const SourceManager &SM,
- SimpleFormatContext &SFC,
- unsigned FUID) :
- FC(FC), Result(Str), Traits(Traits), SM(SM),
- FormatRewriterContext(SFC),
- FormatInMemoryUniqueId(FUID) { }
+ const SourceManager &SM) :
+ FC(FC), Result(Str), Traits(Traits), SM(SM) { }
// Inline content.
void visitTextComment(const TextComment *C);
@@ -574,8 +569,6 @@ private:
const CommandTraits &Traits;
const SourceManager &SM;
- SimpleFormatContext &FormatRewriterContext;
- unsigned FormatInMemoryUniqueId;
};
void getSourceTextOfDeclaration(const DeclInfo *ThisDecl,
@@ -596,21 +589,17 @@ void CommentASTToXMLConverter::formatTextOfDeclaration(
StringRef StringDecl(Declaration.c_str(), Declaration.size());
// Formatter specific code.
- // Form a unique in memory buffer name.
- SmallString<128> filename;
- filename += "xmldecl";
- filename += llvm::utostr(FormatInMemoryUniqueId);
- filename += ".xd";
- FileID ID = FormatRewriterContext.createInMemoryFile(filename, StringDecl);
- SourceLocation Start = FormatRewriterContext.Sources.getLocForStartOfFile(ID)
- .getLocWithOffset(0);
+ unsigned Offset = 0;
unsigned Length = Declaration.size();
- tooling::Replacements Replace = reformat(
- format::getLLVMStyle(), FormatRewriterContext.Sources, ID,
- CharSourceRange::getCharRange(Start, Start.getLocWithOffset(Length)));
- applyAllReplacements(Replace, FormatRewriterContext.Rewrite);
- Declaration = FormatRewriterContext.getRewrittenText(ID);
+ bool IncompleteFormat = false;
+ tooling::Replacements Replaces =
+ reformat(format::getLLVMStyle(), StringDecl,
+ tooling::Range(Offset, Length), "xmldecl.xd", &IncompleteFormat);
+ auto FormattedStringDecl = applyAllReplacements(StringDecl, Replaces);
+ if (static_cast<bool>(FormattedStringDecl)) {
+ Declaration = *FormattedStringDecl;
+ }
}
} // end unnamed namespace
@@ -1126,7 +1115,7 @@ void CommentASTToXMLConverter::appendToResultWithCDATAEscaping(StringRef S) {
Result << "]]>";
}
-CommentToXMLConverter::CommentToXMLConverter() : FormatInMemoryUniqueId(0) {}
+CommentToXMLConverter::CommentToXMLConverter() {}
CommentToXMLConverter::~CommentToXMLConverter() {}
void CommentToXMLConverter::convertCommentToHTML(const FullComment *FC,
@@ -1148,15 +1137,7 @@ void CommentToXMLConverter::convertHTMLTagNodeToText(
void CommentToXMLConverter::convertCommentToXML(const FullComment *FC,
SmallVectorImpl<char> &XML,
const ASTContext &Context) {
- if (!FormatContext || (FormatInMemoryUniqueId % 1000) == 0) {
- // Create a new format context, or re-create it after some number of
- // iterations, so the buffers don't grow too large.
- FormatContext.reset(new SimpleFormatContext(Context.getLangOpts()));
- }
-
CommentASTToXMLConverter Converter(FC, XML, Context.getCommentCommandTraits(),
- Context.getSourceManager(), *FormatContext,
- FormatInMemoryUniqueId++);
+ Context.getSourceManager());
Converter.visit(FC);
}
-
diff --git a/lib/Index/IndexBody.cpp b/lib/Index/IndexBody.cpp
index 4908d852e896..3aa0152ec996 100644
--- a/lib/Index/IndexBody.cpp
+++ b/lib/Index/IndexBody.cpp
@@ -148,7 +148,7 @@ public:
bool VisitDesignatedInitExpr(DesignatedInitExpr *E) {
for (DesignatedInitExpr::Designator &D : llvm::reverse(E->designators())) {
- if (D.isFieldDesignator())
+ if (D.isFieldDesignator() && D.getField())
return IndexCtx.handleReference(D.getField(), D.getFieldLoc(), Parent,
ParentDC, SymbolRoleSet(), {}, E);
}
@@ -276,7 +276,8 @@ public:
return true;
}
- bool TraverseLambdaCapture(LambdaExpr *LE, const LambdaCapture *C) {
+ bool TraverseLambdaCapture(LambdaExpr *LE, const LambdaCapture *C,
+ Expr *Init) {
if (C->capturesThis() || C->capturesVLAType())
return true;
@@ -293,31 +294,6 @@ public:
// Also visit things that are in the syntactic form but not the semantic one,
// for example the indices in DesignatedInitExprs.
bool TraverseInitListExpr(InitListExpr *S, DataRecursionQueue *Q = nullptr) {
-
- class SyntacticFormIndexer :
- public RecursiveASTVisitor<SyntacticFormIndexer> {
- IndexingContext &IndexCtx;
- const NamedDecl *Parent;
- const DeclContext *ParentDC;
-
- public:
- SyntacticFormIndexer(IndexingContext &indexCtx,
- const NamedDecl *Parent, const DeclContext *DC)
- : IndexCtx(indexCtx), Parent(Parent), ParentDC(DC) { }
-
- bool shouldWalkTypesOfTypeLocs() const { return false; }
-
- bool VisitDesignatedInitExpr(DesignatedInitExpr *E) {
- for (DesignatedInitExpr::Designator &D : llvm::reverse(E->designators())) {
- if (D.isFieldDesignator())
- return IndexCtx.handleReference(D.getField(), D.getFieldLoc(),
- Parent, ParentDC, SymbolRoleSet(),
- {}, E);
- }
- return true;
- }
- };
-
auto visitForm = [&](InitListExpr *Form) {
for (Stmt *SubStmt : Form->children()) {
if (!TraverseStmt(SubStmt, Q))
@@ -326,13 +302,26 @@ public:
return true;
};
+ auto visitSyntacticDesignatedInitExpr = [&](DesignatedInitExpr *E) -> bool {
+ for (DesignatedInitExpr::Designator &D : llvm::reverse(E->designators())) {
+ if (D.isFieldDesignator())
+ return IndexCtx.handleReference(D.getField(), D.getFieldLoc(),
+ Parent, ParentDC, SymbolRoleSet(),
+ {}, E);
+ }
+ return true;
+ };
+
InitListExpr *SemaForm = S->isSemanticForm() ? S : S->getSemanticForm();
InitListExpr *SyntaxForm = S->isSemanticForm() ? S->getSyntacticForm() : S;
if (SemaForm) {
// Visit things present in syntactic form but not the semantic form.
if (SyntaxForm) {
- SyntacticFormIndexer(IndexCtx, Parent, ParentDC).TraverseStmt(SyntaxForm);
+ for (Expr *init : SyntaxForm->inits()) {
+ if (auto *DIE = dyn_cast<DesignatedInitExpr>(init))
+ visitSyntacticDesignatedInitExpr(DIE);
+ }
}
return visitForm(SemaForm);
}
diff --git a/lib/Index/IndexDecl.cpp b/lib/Index/IndexDecl.cpp
index eb3e15114735..1225391dc2a6 100644
--- a/lib/Index/IndexDecl.cpp
+++ b/lib/Index/IndexDecl.cpp
@@ -75,8 +75,21 @@ public:
}
}
- bool handleObjCMethod(const ObjCMethodDecl *D) {
- if (!IndexCtx.handleDecl(D, (unsigned)SymbolRole::Dynamic))
+ bool handleObjCMethod(const ObjCMethodDecl *D,
+ const ObjCPropertyDecl *AssociatedProp = nullptr) {
+ SmallVector<SymbolRelation, 4> Relations;
+ SmallVector<const ObjCMethodDecl*, 4> Overriden;
+
+ D->getOverriddenMethods(Overriden);
+ for(auto overridden: Overriden) {
+ Relations.emplace_back((unsigned) SymbolRole::RelationOverrideOf,
+ overridden);
+ }
+ if (AssociatedProp)
+ Relations.emplace_back((unsigned)SymbolRole::RelationAccessorOf,
+ AssociatedProp);
+
+ if (!IndexCtx.handleDecl(D, (unsigned)SymbolRole::Dynamic, Relations))
return false;
IndexCtx.indexTypeSourceInfo(D->getReturnTypeSourceInfo(), D);
for (const auto *I : D->parameters())
@@ -269,9 +282,18 @@ public:
}
bool VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
- if (!IndexCtx.handleDecl(D))
- return false;
- IndexCtx.indexDeclContext(D);
+ const ObjCInterfaceDecl *C = D->getClassInterface();
+ if (C)
+ TRY_TO(IndexCtx.handleReference(C, D->getLocation(), D, D,
+ SymbolRoleSet(), SymbolRelation{
+ (unsigned)SymbolRole::RelationExtendedBy, D
+ }));
+ SourceLocation CategoryLoc = D->getCategoryNameLoc();
+ if (!CategoryLoc.isValid())
+ CategoryLoc = D->getLocation();
+ TRY_TO(IndexCtx.handleDecl(D, CategoryLoc));
+ TRY_TO(handleReferencedProtocols(D->getReferencedProtocols(), D));
+ TRY_TO(IndexCtx.indexDeclContext(D));
return true;
}
@@ -279,8 +301,14 @@ public:
const ObjCCategoryDecl *Cat = D->getCategoryDecl();
if (!Cat)
return true;
-
- if (!IndexCtx.handleDecl(D))
+ const ObjCInterfaceDecl *C = D->getClassInterface();
+ if (C)
+ TRY_TO(IndexCtx.handleReference(C, D->getLocation(), D, D,
+ SymbolRoleSet()));
+ SourceLocation CategoryLoc = D->getCategoryNameLoc();
+ if (!CategoryLoc.isValid())
+ CategoryLoc = D->getLocation();
+ if (!IndexCtx.handleDecl(D, CategoryLoc))
return false;
IndexCtx.indexDeclContext(D);
return true;
@@ -299,10 +327,10 @@ public:
bool VisitObjCPropertyDecl(const ObjCPropertyDecl *D) {
if (ObjCMethodDecl *MD = D->getGetterMethodDecl())
if (MD->getLexicalDeclContext() == D->getLexicalDeclContext())
- handleObjCMethod(MD);
+ handleObjCMethod(MD, D);
if (ObjCMethodDecl *MD = D->getSetterMethodDecl())
if (MD->getLexicalDeclContext() == D->getLexicalDeclContext())
- handleObjCMethod(MD);
+ handleObjCMethod(MD, D);
if (!IndexCtx.handleDecl(D))
return false;
IndexCtx.indexTypeSourceInfo(D->getTypeSourceInfo(), D);
diff --git a/lib/Index/IndexSymbol.cpp b/lib/Index/IndexSymbol.cpp
index 13a845230072..b2342453a916 100644
--- a/lib/Index/IndexSymbol.cpp
+++ b/lib/Index/IndexSymbol.cpp
@@ -40,12 +40,12 @@ static bool isUnitTest(const ObjCMethodDecl *D) {
return isUnitTestCase(D->getClassInterface());
}
-static void checkForIBOutlets(const Decl *D, SymbolSubKindSet &SubKindSet) {
+static void checkForIBOutlets(const Decl *D, SymbolPropertySet &PropSet) {
if (D->hasAttr<IBOutletAttr>()) {
- SubKindSet |= (unsigned)SymbolSubKind::IBAnnotated;
+ PropSet |= (unsigned)SymbolProperty::IBAnnotated;
} else if (D->hasAttr<IBOutletCollectionAttr>()) {
- SubKindSet |= (unsigned)SymbolSubKind::IBAnnotated;
- SubKindSet |= (unsigned)SymbolSubKind::IBOutletCollection;
+ PropSet |= (unsigned)SymbolProperty::IBAnnotated;
+ PropSet |= (unsigned)SymbolProperty::IBOutletCollection;
}
}
@@ -53,7 +53,7 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
assert(D);
SymbolInfo Info;
Info.Kind = SymbolKind::Unknown;
- Info.SubKinds = SymbolSubKindSet();
+ Info.Properties = SymbolPropertySet();
Info.Lang = SymbolLanguage::C;
if (const TagDecl *TD = dyn_cast<TagDecl>(D)) {
@@ -74,16 +74,40 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
Info.Kind = SymbolKind::Enum; break;
}
- if (const CXXRecordDecl *CXXRec = dyn_cast<CXXRecordDecl>(D))
- if (!CXXRec->isCLike())
+ if (const CXXRecordDecl *CXXRec = dyn_cast<CXXRecordDecl>(D)) {
+ if (!CXXRec->isCLike()) {
Info.Lang = SymbolLanguage::CXX;
+ if (CXXRec->getDescribedClassTemplate()) {
+ Info.Properties |= (unsigned)SymbolProperty::Generic;
+ }
+ }
+ }
if (isa<ClassTemplatePartialSpecializationDecl>(D)) {
- Info.SubKinds |= (unsigned)SymbolSubKind::Generic;
- Info.SubKinds |= (unsigned)SymbolSubKind::TemplatePartialSpecialization;
+ Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (unsigned)SymbolProperty::TemplatePartialSpecialization;
} else if (isa<ClassTemplateSpecializationDecl>(D)) {
- Info.SubKinds |= (unsigned)SymbolSubKind::Generic;
- Info.SubKinds |= (unsigned)SymbolSubKind::TemplateSpecialization;
+ Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (unsigned)SymbolProperty::TemplateSpecialization;
+ }
+
+ } else if (auto *VD = dyn_cast<VarDecl>(D)) {
+ Info.Kind = SymbolKind::Variable;
+ if (isa<CXXRecordDecl>(D->getDeclContext())) {
+ Info.Kind = SymbolKind::StaticProperty;
+ Info.Lang = SymbolLanguage::CXX;
+ }
+ if (isa<VarTemplatePartialSpecializationDecl>(D)) {
+ Info.Lang = SymbolLanguage::CXX;
+ Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (unsigned)SymbolProperty::TemplatePartialSpecialization;
+ } else if (isa<VarTemplateSpecializationDecl>(D)) {
+ Info.Lang = SymbolLanguage::CXX;
+ Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (unsigned)SymbolProperty::TemplateSpecialization;
+ } else if (VD->getDescribedVarTemplate()) {
+ Info.Lang = SymbolLanguage::CXX;
+ Info.Properties |= (unsigned)SymbolProperty::Generic;
}
} else {
@@ -96,16 +120,6 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
case Decl::Function:
Info.Kind = SymbolKind::Function;
break;
- case Decl::ParmVar:
- Info.Kind = SymbolKind::Variable;
- break;
- case Decl::Var:
- Info.Kind = SymbolKind::Variable;
- if (isa<CXXRecordDecl>(D->getDeclContext())) {
- Info.Kind = SymbolKind::StaticProperty;
- Info.Lang = SymbolLanguage::CXX;
- }
- break;
case Decl::Field:
Info.Kind = SymbolKind::Field;
if (const CXXRecordDecl *
@@ -124,7 +138,7 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
if (!ClsD)
ClsD = cast<ObjCImplementationDecl>(D)->getClassInterface();
if (isUnitTestCase(ClsD))
- Info.SubKinds |= (unsigned)SymbolSubKind::UnitTest;
+ Info.Properties |= (unsigned)SymbolProperty::UnitTest;
break;
}
case Decl::ObjCProtocol:
@@ -143,19 +157,23 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
Info.Kind = SymbolKind::ClassMethod;
Info.Lang = SymbolLanguage::ObjC;
if (isUnitTest(cast<ObjCMethodDecl>(D)))
- Info.SubKinds |= (unsigned)SymbolSubKind::UnitTest;
+ Info.Properties |= (unsigned)SymbolProperty::UnitTest;
if (D->hasAttr<IBActionAttr>())
- Info.SubKinds |= (unsigned)SymbolSubKind::IBAnnotated;
+ Info.Properties |= (unsigned)SymbolProperty::IBAnnotated;
break;
case Decl::ObjCProperty:
Info.Kind = SymbolKind::InstanceProperty;
Info.Lang = SymbolLanguage::ObjC;
- checkForIBOutlets(D, Info.SubKinds);
+ checkForIBOutlets(D, Info.Properties);
+ if (auto *Annot = D->getAttr<AnnotateAttr>()) {
+ if (Annot->getAnnotation() == "gk_inspectable")
+ Info.Properties |= (unsigned)SymbolProperty::GKInspectable;
+ }
break;
case Decl::ObjCIvar:
Info.Kind = SymbolKind::Field;
Info.Lang = SymbolLanguage::ObjC;
- checkForIBOutlets(D, Info.SubKinds);
+ checkForIBOutlets(D, Info.Properties);
break;
case Decl::Namespace:
Info.Kind = SymbolKind::Namespace;
@@ -188,12 +206,12 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
}
case Decl::ClassTemplate:
Info.Kind = SymbolKind::Class;
- Info.SubKinds |= (unsigned)SymbolSubKind::Generic;
+ Info.Properties |= (unsigned)SymbolProperty::Generic;
Info.Lang = SymbolLanguage::CXX;
break;
case Decl::FunctionTemplate:
Info.Kind = SymbolKind::Function;
- Info.SubKinds |= (unsigned)SymbolSubKind::Generic;
+ Info.Properties |= (unsigned)SymbolProperty::Generic;
Info.Lang = SymbolLanguage::CXX;
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(
cast<FunctionTemplateDecl>(D)->getTemplatedDecl())) {
@@ -214,7 +232,7 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
case Decl::TypeAliasTemplate:
Info.Kind = SymbolKind::TypeAlias;
Info.Lang = SymbolLanguage::CXX;
- Info.SubKinds |= (unsigned)SymbolSubKind::Generic;
+ Info.Properties |= (unsigned)SymbolProperty::Generic;
break;
case Decl::TypeAlias:
Info.Kind = SymbolKind::TypeAlias;
@@ -231,12 +249,12 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->getTemplatedKind() ==
FunctionDecl::TK_FunctionTemplateSpecialization) {
- Info.SubKinds |= (unsigned)SymbolSubKind::Generic;
- Info.SubKinds |= (unsigned)SymbolSubKind::TemplateSpecialization;
+ Info.Properties |= (unsigned)SymbolProperty::Generic;
+ Info.Properties |= (unsigned)SymbolProperty::TemplateSpecialization;
}
}
- if (Info.SubKinds & (unsigned)SymbolSubKind::Generic)
+ if (Info.Properties & (unsigned)SymbolProperty::Generic)
Info.Lang = SymbolLanguage::CXX;
return Info;
@@ -262,6 +280,8 @@ void index::applyForEachSymbolRole(SymbolRoleSet Roles,
APPLY_FOR_ROLE(RelationOverrideOf);
APPLY_FOR_ROLE(RelationReceivedBy);
APPLY_FOR_ROLE(RelationCalledBy);
+ APPLY_FOR_ROLE(RelationExtendedBy);
+ APPLY_FOR_ROLE(RelationAccessorOf);
#undef APPLY_FOR_ROLE
}
@@ -288,6 +308,8 @@ void index::printSymbolRoles(SymbolRoleSet Roles, raw_ostream &OS) {
case SymbolRole::RelationOverrideOf: OS << "RelOver"; break;
case SymbolRole::RelationReceivedBy: OS << "RelRec"; break;
case SymbolRole::RelationCalledBy: OS << "RelCall"; break;
+ case SymbolRole::RelationExtendedBy: OS << "RelExt"; break;
+ case SymbolRole::RelationAccessorOf: OS << "RelAcc"; break;
}
});
}
@@ -350,36 +372,38 @@ StringRef index::getSymbolLanguageString(SymbolLanguage K) {
llvm_unreachable("invalid symbol language kind");
}
-void index::applyForEachSymbolSubKind(SymbolSubKindSet SubKinds,
- llvm::function_ref<void(SymbolSubKind)> Fn) {
-#define APPLY_FOR_SUBKIND(K) \
- if (SubKinds & (unsigned)SymbolSubKind::K) \
- Fn(SymbolSubKind::K)
+void index::applyForEachSymbolProperty(SymbolPropertySet Props,
+ llvm::function_ref<void(SymbolProperty)> Fn) {
+#define APPLY_FOR_PROPERTY(K) \
+ if (Props & (unsigned)SymbolProperty::K) \
+ Fn(SymbolProperty::K)
- APPLY_FOR_SUBKIND(Generic);
- APPLY_FOR_SUBKIND(TemplatePartialSpecialization);
- APPLY_FOR_SUBKIND(TemplateSpecialization);
- APPLY_FOR_SUBKIND(UnitTest);
- APPLY_FOR_SUBKIND(IBAnnotated);
- APPLY_FOR_SUBKIND(IBOutletCollection);
+ APPLY_FOR_PROPERTY(Generic);
+ APPLY_FOR_PROPERTY(TemplatePartialSpecialization);
+ APPLY_FOR_PROPERTY(TemplateSpecialization);
+ APPLY_FOR_PROPERTY(UnitTest);
+ APPLY_FOR_PROPERTY(IBAnnotated);
+ APPLY_FOR_PROPERTY(IBOutletCollection);
+ APPLY_FOR_PROPERTY(GKInspectable);
-#undef APPLY_FOR_SUBKIND
+#undef APPLY_FOR_PROPERTY
}
-void index::printSymbolSubKinds(SymbolSubKindSet SubKinds, raw_ostream &OS) {
+void index::printSymbolProperties(SymbolPropertySet Props, raw_ostream &OS) {
bool VisitedOnce = false;
- applyForEachSymbolSubKind(SubKinds, [&](SymbolSubKind SubKind) {
+ applyForEachSymbolProperty(Props, [&](SymbolProperty Prop) {
if (VisitedOnce)
OS << ',';
else
VisitedOnce = true;
- switch (SubKind) {
- case SymbolSubKind::Generic: OS << "Gen"; break;
- case SymbolSubKind::TemplatePartialSpecialization: OS << "TPS"; break;
- case SymbolSubKind::TemplateSpecialization: OS << "TS"; break;
- case SymbolSubKind::UnitTest: OS << "test"; break;
- case SymbolSubKind::IBAnnotated: OS << "IB"; break;
- case SymbolSubKind::IBOutletCollection: OS << "IBColl"; break;
+ switch (Prop) {
+ case SymbolProperty::Generic: OS << "Gen"; break;
+ case SymbolProperty::TemplatePartialSpecialization: OS << "TPS"; break;
+ case SymbolProperty::TemplateSpecialization: OS << "TS"; break;
+ case SymbolProperty::UnitTest: OS << "test"; break;
+ case SymbolProperty::IBAnnotated: OS << "IB"; break;
+ case SymbolProperty::IBOutletCollection: OS << "IBColl"; break;
+ case SymbolProperty::GKInspectable: OS << "GKI"; break;
}
});
}
diff --git a/lib/Index/IndexingContext.cpp b/lib/Index/IndexingContext.cpp
index bcc367c6626d..e623a495b47b 100644
--- a/lib/Index/IndexingContext.cpp
+++ b/lib/Index/IndexingContext.cpp
@@ -130,9 +130,10 @@ bool IndexingContext::isTemplateImplicitInstantiation(const Decl *D) {
if (const ClassTemplateSpecializationDecl *
SD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
TKind = SD->getSpecializationKind();
- }
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
TKind = FD->getTemplateSpecializationKind();
+ } else if (auto *VD = dyn_cast<VarDecl>(D)) {
+ TKind = VD->getTemplateSpecializationKind();
}
switch (TKind) {
case TSK_Undeclared:
@@ -164,9 +165,10 @@ static const Decl *adjustTemplateImplicitInstantiation(const Decl *D) {
if (const ClassTemplateSpecializationDecl *
SD = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
return SD->getTemplateInstantiationPattern();
- }
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
return FD->getTemplateInstantiationPattern();
+ } else if (auto *VD = dyn_cast<VarDecl>(D)) {
+ return VD->getTemplateInstantiationPattern();
}
return nullptr;
}
@@ -290,19 +292,9 @@ bool IndexingContext::handleDeclOccurrence(const Decl *D, SourceLocation Loc,
Roles |= (unsigned)SymbolRole::Declaration;
D = getCanonicalDecl(D);
- if (D->isImplicit() && !isa<ObjCMethodDecl>(D) &&
- !(isa<FunctionDecl>(D) && cast<FunctionDecl>(D)->getBuiltinID())) {
- // operator new declarations will link to the implicit one as canonical.
- return true;
- }
Parent = adjustParent(Parent);
if (Parent)
Parent = getCanonicalDecl(Parent);
- assert((!Parent || !Parent->isImplicit() ||
- (isa<FunctionDecl>(Parent) &&
- cast<FunctionDecl>(Parent)->getBuiltinID()) ||
- isa<ObjCInterfaceDecl>(Parent) || isa<ObjCMethodDecl>(Parent)) &&
- "unexpected implicit parent!");
SmallVector<SymbolRelation, 6> FinalRelations;
FinalRelations.reserve(Relations.size()+1);
diff --git a/lib/Index/USRGeneration.cpp b/lib/Index/USRGeneration.cpp
index 30f1add249b1..58f61c3c65b7 100644
--- a/lib/Index/USRGeneration.cpp
+++ b/lib/Index/USRGeneration.cpp
@@ -12,7 +12,6 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/Lex/PreprocessingRecord.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
@@ -174,8 +173,11 @@ bool USRGenerator::ShouldGenerateLocation(const NamedDecl *D) {
return false;
if (D->getParentFunctionOrMethod())
return true;
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid())
+ return false;
const SourceManager &SM = Context->getSourceManager();
- return !SM.isInSystemHeader(D->getLocation());
+ return !SM.isInSystemHeader(Loc);
}
void USRGenerator::VisitDeclContext(const DeclContext *DC) {
@@ -284,6 +286,15 @@ void USRGenerator::VisitVarDecl(const VarDecl *D) {
VisitDeclContext(D->getDeclContext());
+ if (VarTemplateDecl *VarTmpl = D->getDescribedVarTemplate()) {
+ Out << "@VT";
+ VisitTemplateParameterList(VarTmpl->getTemplateParameters());
+ } else if (const VarTemplatePartialSpecializationDecl *PartialSpec
+ = dyn_cast<VarTemplatePartialSpecializationDecl>(D)) {
+ Out << "@VP";
+ VisitTemplateParameterList(PartialSpec->getTemplateParameters());
+ }
+
// Variables always have simple names.
StringRef s = D->getName();
@@ -295,6 +306,17 @@ void USRGenerator::VisitVarDecl(const VarDecl *D) {
IgnoreResults = true;
else
Out << '@' << s;
+
+ // For a template specialization, mangle the template arguments.
+ if (const VarTemplateSpecializationDecl *Spec
+ = dyn_cast<VarTemplateSpecializationDecl>(D)) {
+ const TemplateArgumentList &Args = Spec->getTemplateInstantiationArgs();
+ Out << '>';
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ Out << '#';
+ VisitTemplateArgument(Args.get(I));
+ }
+ }
}
void USRGenerator::VisitNonTypeTemplateParmDecl(
@@ -875,9 +897,11 @@ void clang::index::generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS) {
bool clang::index::generateUSRForDecl(const Decl *D,
SmallVectorImpl<char> &Buf) {
- // Don't generate USRs for things with invalid locations.
- if (!D || D->getLocStart().isInvalid())
+ if (!D)
return true;
+ // We don't ignore decls with invalid source locations. Implicit decls, like
+ // C++'s operator new function, can have invalid locations but it is fine to
+ // create USRs that can identify them.
USRGenerator UG(&D->getASTContext(), Buf);
UG.Visit(D);
diff --git a/lib/Lex/HeaderMap.cpp b/lib/Lex/HeaderMap.cpp
index 4cace5b00245..24a14b6cdb57 100644
--- a/lib/Lex/HeaderMap.cpp
+++ b/lib/Lex/HeaderMap.cpp
@@ -106,7 +106,7 @@ bool HeaderMapImpl::checkHeader(const llvm::MemoryBuffer &File,
/// getFileName - Return the filename of the headermap.
-const char *HeaderMapImpl::getFileName() const {
+StringRef HeaderMapImpl::getFileName() const {
return FileBuffer->getBufferIdentifier();
}
diff --git a/lib/Lex/HeaderSearch.cpp b/lib/Lex/HeaderSearch.cpp
index e5cc30e41c57..b5228fc6c8cb 100644
--- a/lib/Lex/HeaderSearch.cpp
+++ b/lib/Lex/HeaderSearch.cpp
@@ -26,7 +26,6 @@
#include "llvm/Support/Capacity.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/raw_ostream.h"
#include <cstdio>
#include <utility>
#if defined(LLVM_ON_UNIX)
@@ -37,9 +36,12 @@ using namespace clang;
const IdentifierInfo *
HeaderFileInfo::getControllingMacro(ExternalPreprocessorSource *External) {
if (ControllingMacro) {
- if (ControllingMacro->isOutOfDate())
+ if (ControllingMacro->isOutOfDate()) {
+ assert(External && "We must have an external source if we have a "
+ "controlling macro that is out of date.");
External->updateOutOfDateIdentifier(
*const_cast<IdentifierInfo *>(ControllingMacro));
+ }
return ControllingMacro;
}
@@ -119,14 +121,39 @@ const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
return nullptr;
}
+/// \brief Get filenames for all registered header maps.
+void HeaderSearch::getHeaderMapFileNames(
+ SmallVectorImpl<std::string> &Names) const {
+ for (auto &HM : HeaderMaps)
+ Names.push_back(HM.first->getName());
+}
+
std::string HeaderSearch::getModuleFileName(Module *Module) {
const FileEntry *ModuleMap =
getModuleMap().getModuleMapFileForUniquing(Module);
- return getModuleFileName(Module->Name, ModuleMap->getName());
+ return getModuleFileName(Module->Name, ModuleMap->getName(),
+ /*UsePrebuiltPath*/false);
}
std::string HeaderSearch::getModuleFileName(StringRef ModuleName,
- StringRef ModuleMapPath) {
+ StringRef ModuleMapPath,
+ bool UsePrebuiltPath) {
+ if (UsePrebuiltPath) {
+ if (HSOpts->PrebuiltModulePaths.empty())
+ return std::string();
+
+ // Go though each prebuilt module path and try to find the pcm file.
+ for (const std::string &Dir : HSOpts->PrebuiltModulePaths) {
+ SmallString<256> Result(Dir);
+ llvm::sys::fs::make_absolute(Result);
+
+ llvm::sys::path::append(Result, ModuleName + ".pcm");
+ if (getFileMgr().getFile(Result.str()))
+ return Result.str().str();
+ }
+ return std::string();
+ }
+
// If we don't have a module cache path or aren't supposed to use one, we
// can't do anything.
if (getModuleCachePath().empty())
@@ -167,16 +194,36 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch) {
Module *Module = ModMap.findModule(ModuleName);
if (Module || !AllowSearch || !HSOpts->ImplicitModuleMaps)
return Module;
-
+
+ StringRef SearchName = ModuleName;
+ Module = lookupModule(ModuleName, SearchName);
+
+ // The facility for "private modules" -- adjacent, optional module maps named
+ // module.private.modulemap that are supposed to define private submodules --
+ // is sometimes misused by frameworks that name their associated private
+ // module FooPrivate, rather than as a submodule named Foo.Private as
+ // intended. Here we compensate for such cases by looking in directories named
+ // Foo.framework, when we previously looked and failed to find a
+ // FooPrivate.framework.
+ if (!Module && SearchName.consume_back("Private"))
+ Module = lookupModule(ModuleName, SearchName);
+ return Module;
+}
+
+Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName) {
+ Module *Module = nullptr;
+
// Look through the various header search paths to load any available module
// maps, searching for a module map that describes this module.
for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
if (SearchDirs[Idx].isFramework()) {
- // Search for or infer a module map for a framework.
+ // Search for or infer a module map for a framework. Here we use
+ // SearchName rather than ModuleName, to permit finding private modules
+ // named FooPrivate in buggy frameworks named Foo.
SmallString<128> FrameworkDirName;
FrameworkDirName += SearchDirs[Idx].getFrameworkDir()->getName();
- llvm::sys::path::append(FrameworkDirName, ModuleName + ".framework");
- if (const DirectoryEntry *FrameworkDir
+ llvm::sys::path::append(FrameworkDirName, SearchName + ".framework");
+ if (const DirectoryEntry *FrameworkDir
= FileMgr.getDirectory(FrameworkDirName)) {
bool IsSystem
= SearchDirs[Idx].getDirCharacteristic() != SrcMgr::C_User;
@@ -240,7 +287,7 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch) {
/// getName - Return the directory or filename corresponding to this lookup
/// object.
-const char *DirectoryLookup::getName() const {
+StringRef DirectoryLookup::getName() const {
if (isNormalDir())
return getDir()->getName();
if (isFramework())
@@ -396,6 +443,12 @@ getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
return TopFrameworkDir;
}
+static bool needModuleLookup(Module *RequestingModule,
+ bool HasSuggestedModule) {
+ return HasSuggestedModule ||
+ (RequestingModule && RequestingModule->NoUndeclaredIncludes);
+}
+
/// DoFrameworkLookup - Do a lookup of the specified file in the current
/// DirectoryLookup, which is a framework directory.
const FileEntry *DirectoryLookup::DoFrameworkLookup(
@@ -491,7 +544,7 @@ const FileEntry *DirectoryLookup::DoFrameworkLookup(
}
// If we found the header and are allowed to suggest a module, do so now.
- if (FE && SuggestedModule) {
+ if (FE && needModuleLookup(RequestingModule, SuggestedModule)) {
// Find the framework in which this header occurs.
StringRef FrameworkPath = FE->getDir()->getName();
bool FoundFramework = false;
@@ -830,17 +883,19 @@ LookupSubframeworkHeader(StringRef Filename,
if (SlashPos == StringRef::npos) return nullptr;
// Look up the base framework name of the ContextFileEnt.
- const char *ContextName = ContextFileEnt->getName();
+ StringRef ContextName = ContextFileEnt->getName();
// If the context info wasn't a framework, couldn't be a subframework.
const unsigned DotFrameworkLen = 10;
- const char *FrameworkPos = strstr(ContextName, ".framework");
- if (FrameworkPos == nullptr ||
- (FrameworkPos[DotFrameworkLen] != '/' &&
- FrameworkPos[DotFrameworkLen] != '\\'))
+ auto FrameworkPos = ContextName.find(".framework");
+ if (FrameworkPos == StringRef::npos ||
+ (ContextName[FrameworkPos + DotFrameworkLen] != '/' &&
+ ContextName[FrameworkPos + DotFrameworkLen] != '\\'))
return nullptr;
- SmallString<1024> FrameworkName(ContextName, FrameworkPos+DotFrameworkLen+1);
+ SmallString<1024> FrameworkName(ContextName.data(), ContextName.data() +
+ FrameworkPos +
+ DotFrameworkLen + 1);
// Append Frameworks/HIToolbox.framework/
FrameworkName += "Frameworks/";
@@ -1139,22 +1194,45 @@ bool HeaderSearch::hasModuleMap(StringRef FileName,
}
ModuleMap::KnownHeader
-HeaderSearch::findModuleForHeader(const FileEntry *File) const {
+HeaderSearch::findModuleForHeader(const FileEntry *File,
+ bool AllowTextual) const {
if (ExternalSource) {
// Make sure the external source has handled header info about this file,
// which includes whether the file is part of a module.
(void)getExistingFileInfo(File);
}
- return ModMap.findModuleForHeader(File);
+ return ModMap.findModuleForHeader(File, AllowTextual);
+}
+
+static bool suggestModule(HeaderSearch &HS, const FileEntry *File,
+ Module *RequestingModule,
+ ModuleMap::KnownHeader *SuggestedModule) {
+ ModuleMap::KnownHeader Module =
+ HS.findModuleForHeader(File, /*AllowTextual*/true);
+ if (SuggestedModule)
+ *SuggestedModule = (Module.getRole() & ModuleMap::TextualHeader)
+ ? ModuleMap::KnownHeader()
+ : Module;
+
+ // If this module specifies [no_undeclared_includes], we cannot find any
+ // file that's in a non-dependency module.
+ if (RequestingModule && Module && RequestingModule->NoUndeclaredIncludes) {
+ HS.getModuleMap().resolveUses(RequestingModule, /*Complain*/false);
+ if (!RequestingModule->directlyUses(Module.getModule())) {
+ return false;
+ }
+ }
+
+ return true;
}
bool HeaderSearch::findUsableModuleForHeader(
const FileEntry *File, const DirectoryEntry *Root, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule, bool IsSystemHeaderDir) {
- if (File && SuggestedModule) {
+ if (File && needModuleLookup(RequestingModule, SuggestedModule)) {
// If there is a module that corresponds to this header, suggest it.
hasModuleMap(File->getName(), Root, IsSystemHeaderDir);
- *SuggestedModule = findModuleForHeader(File);
+ return suggestModule(*this, File, RequestingModule, SuggestedModule);
}
return true;
}
@@ -1163,7 +1241,7 @@ bool HeaderSearch::findUsableModuleForFrameworkHeader(
const FileEntry *File, StringRef FrameworkName, Module *RequestingModule,
ModuleMap::KnownHeader *SuggestedModule, bool IsSystemFramework) {
// If we're supposed to suggest a module, look for one now.
- if (SuggestedModule) {
+ if (needModuleLookup(RequestingModule, SuggestedModule)) {
// Find the top-level framework based on this framework.
SmallVector<std::string, 4> SubmodulePath;
const DirectoryEntry *TopFrameworkDir
@@ -1180,7 +1258,7 @@ bool HeaderSearch::findUsableModuleForFrameworkHeader(
// important so that we're consistent about whether this header
// corresponds to a module. Possibly we should lock down framework modules
// so that this is not possible.
- *SuggestedModule = findModuleForHeader(File);
+ return suggestModule(*this, File, RequestingModule, SuggestedModule);
}
return true;
}
@@ -1432,7 +1510,7 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(const FileEntry *File,
// FIXME: We assume that the path name currently cached in the FileEntry is
// the most appropriate one for this analysis (and that it's spelled the same
// way as the corresponding header search path).
- const char *Name = File->getName();
+ StringRef Name = File->getName();
unsigned BestPrefixLength = 0;
unsigned BestSearchDir;
@@ -1442,7 +1520,7 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(const FileEntry *File,
if (!SearchDirs[I].isNormalDir())
continue;
- const char *Dir = SearchDirs[I].getDir()->getName();
+ StringRef Dir = SearchDirs[I].getDir()->getName();
for (auto NI = llvm::sys::path::begin(Name),
NE = llvm::sys::path::end(Name),
DI = llvm::sys::path::begin(Dir),
@@ -1475,5 +1553,5 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(const FileEntry *File,
if (IsSystem)
*IsSystem = BestPrefixLength ? BestSearchDir >= SystemDirIdx : false;
- return Name + BestPrefixLength;
+ return Name.drop_front(BestPrefixLength);
}
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 9c2a0163acea..6025a6675125 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -14,18 +14,27 @@
#include "clang/Lex/Lexer.h"
#include "UnicodeCharSets.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/Preprocessor.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/UnicodeCharRanges.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
#include <cstring>
+#include <string>
+#include <tuple>
+#include <utility>
+
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -45,7 +54,6 @@ tok::ObjCKeywordKind Token::getObjCKeywordID() const {
return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
}
-
//===----------------------------------------------------------------------===//
// Lexer Class Implementation
//===----------------------------------------------------------------------===//
@@ -196,7 +204,6 @@ Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
return L;
}
-
/// Stringify - Convert the specified string into a C string, with surrounding
/// ""'s, and with escaped \ and " characters.
std::string Lexer::Stringify(StringRef Str, bool Charify) {
@@ -398,7 +405,6 @@ unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
return getSpellingSlow(Tok, TokStart, LangOpts, const_cast<char*>(Buffer));
}
-
/// MeasureTokenLength - Relex the token at the specified location and return
/// its length in bytes in the input file. If the token needs cleaning (e.g.
/// includes a trigraph or an escaped newline) then this count includes bytes
@@ -526,13 +532,15 @@ SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
}
namespace {
+
enum PreambleDirectiveKind {
PDK_Skipped,
PDK_StartIf,
PDK_EndIf,
PDK_Unknown
};
-}
+
+} // end anonymous namespace
std::pair<unsigned, bool> Lexer::ComputePreamble(StringRef Buffer,
const LangOptions &LangOpts,
@@ -694,7 +702,6 @@ std::pair<unsigned, bool> Lexer::ComputePreamble(StringRef Buffer,
: TheTok.isAtStartOfLine());
}
-
/// AdvanceToTokenCharacter - Given a location that specifies the start of a
/// token, return a new location that specifies a character within the token.
SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
@@ -961,7 +968,7 @@ StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
assert(Loc.isMacroID() && "Only reasonble to call this on macros");
// Find the location of the immediate macro expansion.
- while (1) {
+ while (true) {
FileID FID = SM.getFileID(Loc);
const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
@@ -1031,7 +1038,6 @@ bool Lexer::isIdentifierBodyChar(char c, const LangOptions &LangOpts) {
return isIdentifierBody(c, LangOpts.DollarIdents);
}
-
//===----------------------------------------------------------------------===//
// Diagnostics forwarding code.
//===----------------------------------------------------------------------===//
@@ -1157,7 +1163,7 @@ unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
/// them), skip over them and return the first non-escaped-newline found,
/// otherwise return P.
const char *Lexer::SkipEscapedNewLines(const char *P) {
- while (1) {
+ while (true) {
const char *AfterEscape;
if (*P == '\\') {
AfterEscape = P+1;
@@ -1310,7 +1316,6 @@ Slash:
return *Ptr;
}
-
/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size,
/// and that we have already incremented Ptr by Size bytes.
@@ -1480,13 +1485,13 @@ bool Lexer::tryConsumeIdentifierUCN(const char *&CurPtr, unsigned Size,
bool Lexer::tryConsumeIdentifierUTF8Char(const char *&CurPtr) {
const char *UnicodePtr = CurPtr;
- UTF32 CodePoint;
- ConversionResult Result =
- llvm::convertUTF8Sequence((const UTF8 **)&UnicodePtr,
- (const UTF8 *)BufferEnd,
+ llvm::UTF32 CodePoint;
+ llvm::ConversionResult Result =
+ llvm::convertUTF8Sequence((const llvm::UTF8 **)&UnicodePtr,
+ (const llvm::UTF8 *)BufferEnd,
&CodePoint,
- strictConversion);
- if (Result != conversionOK ||
+ llvm::strictConversion);
+ if (Result != llvm::conversionOK ||
!isAllowedIDChar(static_cast<uint32_t>(CodePoint), LangOpts))
return false;
@@ -1533,14 +1538,22 @@ FinishIdentifier:
// preprocessor, which may macro expand it or something.
if (II->isHandleIdentifierCase())
return PP->HandleIdentifier(Result);
-
+
+ if (II->getTokenID() == tok::identifier && isCodeCompletionPoint(CurPtr)
+ && II->getPPKeywordID() == tok::pp_not_keyword
+ && II->getObjCKeywordID() == tok::objc_not_keyword) {
+ // Return the code-completion token.
+ Result.setKind(tok::code_completion);
+ cutOffLexing();
+ return true;
+ }
return true;
}
// Otherwise, $,\,? in identifier found. Enter slower path.
C = getCharAndSize(CurPtr, Size);
- while (1) {
+ while (true) {
if (C == '$') {
// If we hit a $ and they are not supported in identifiers, we are done.
if (!LangOpts.DollarIdents) goto FinishIdentifier;
@@ -1700,9 +1713,9 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
getLangOpts());
if (!isIdentifierBody(Next)) {
// End of suffix. Check whether this is on the whitelist.
- IsUDSuffix = (Chars == 1 && Buffer[0] == 's') ||
- NumericLiteralParser::isValidUDSuffix(
- getLangOpts(), StringRef(Buffer, Chars));
+ const StringRef CompleteSuffix(Buffer, Chars);
+ IsUDSuffix = StringLiteralParser::isValidUDSuffix(getLangOpts(),
+ CompleteSuffix);
break;
}
@@ -1829,7 +1842,7 @@ bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
// Search for the next '"' in hopes of salvaging the lexer. Unfortunately,
// it's possible the '"' was intended to be part of the raw string, but
// there's not much we can do about that.
- while (1) {
+ while (true) {
char C = *CurPtr++;
if (C == '"')
@@ -1848,7 +1861,7 @@ bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
const char *Prefix = CurPtr;
CurPtr += PrefixLen + 1; // skip over prefix and '('
- while (1) {
+ while (true) {
char C = *CurPtr++;
if (C == ')') {
@@ -1913,7 +1926,6 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
return true;
}
-
/// LexCharConstant - Lex the remainder of a character constant, after having
/// lexed either ' or L' or u8' or u' or U'.
bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
@@ -1992,7 +2004,7 @@ bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr,
unsigned char Char = *CurPtr;
// Skip consecutive spaces efficiently.
- while (1) {
+ while (true) {
// Skip horizontal whitespace very aggressively.
while (isHorizontalWhitespace(Char))
Char = *++CurPtr;
@@ -2315,7 +2327,7 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
if (C == '/')
C = *CurPtr++;
- while (1) {
+ while (true) {
// Skip over all non-interesting characters until we find end of buffer or a
// (probably ending) '/' character.
if (CurPtr + 24 < BufferEnd &&
@@ -2456,7 +2468,7 @@ void Lexer::ReadToEndOfLine(SmallVectorImpl<char> *Result) {
// CurPtr - Cache BufferPtr in an automatic variable.
const char *CurPtr = BufferPtr;
- while (1) {
+ while (true) {
char Char = getAndAdvanceChar(CurPtr, Tmp);
switch (Char) {
default:
@@ -2669,7 +2681,6 @@ bool Lexer::IsStartOfConflictMarker(const char *CurPtr) {
return false;
}
-
/// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if
/// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it
/// is the end of a conflict marker. Handle it by ignoring up until the end of
@@ -3498,7 +3509,6 @@ LexNextToken:
CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
Kind = tok::greatergreater;
}
-
} else {
Kind = tok::greater;
}
@@ -3615,17 +3625,17 @@ LexNextToken:
break;
}
- UTF32 CodePoint;
+ llvm::UTF32 CodePoint;
// We can't just reset CurPtr to BufferPtr because BufferPtr may point to
// an escaped newline.
--CurPtr;
- ConversionResult Status =
- llvm::convertUTF8Sequence((const UTF8 **)&CurPtr,
- (const UTF8 *)BufferEnd,
+ llvm::ConversionResult Status =
+ llvm::convertUTF8Sequence((const llvm::UTF8 **)&CurPtr,
+ (const llvm::UTF8 *)BufferEnd,
&CodePoint,
- strictConversion);
- if (Status == conversionOK) {
+ llvm::strictConversion);
+ if (Status == llvm::conversionOK) {
if (CheckUnicodeWhitespace(Result, CodePoint, CurPtr)) {
if (SkipWhitespace(Result, CurPtr, TokAtPhysicalStartOfLine))
return true; // KeepWhitespaceMode
diff --git a/lib/Lex/LiteralSupport.cpp b/lib/Lex/LiteralSupport.cpp
index e68b82fb499a..fbfd3fe5cce0 100644
--- a/lib/Lex/LiteralSupport.cpp
+++ b/lib/Lex/LiteralSupport.cpp
@@ -14,12 +14,25 @@
#include "clang/Lex/LiteralSupport.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/Lexer.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/Token.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <string>
using namespace clang;
@@ -134,7 +147,7 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
if (Diags)
Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
diag::err_hex_escape_no_digits) << "x";
- HadError = 1;
+ HadError = true;
break;
}
@@ -389,7 +402,7 @@ static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
if (CharByteWidth == 4) {
// FIXME: Make the type of the result buffer correct instead of
// using reinterpret_cast.
- UTF32 *ResultPtr = reinterpret_cast<UTF32*>(ResultBuf);
+ llvm::UTF32 *ResultPtr = reinterpret_cast<llvm::UTF32*>(ResultBuf);
*ResultPtr = UcnVal;
ResultBuf += 4;
return;
@@ -398,7 +411,7 @@ static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
if (CharByteWidth == 2) {
// FIXME: Make the type of the result buffer correct instead of
// using reinterpret_cast.
- UTF16 *ResultPtr = reinterpret_cast<UTF16*>(ResultBuf);
+ llvm::UTF16 *ResultPtr = reinterpret_cast<llvm::UTF16*>(ResultBuf);
if (UcnVal <= (UTF32)0xFFFF) {
*ResultPtr = UcnVal;
@@ -452,7 +465,6 @@ static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
ResultBuf += bytesToWrite;
}
-
/// integer-constant: [C99 6.4.4.1]
/// decimal-constant integer-suffix
/// octal-constant integer-suffix
@@ -985,7 +997,6 @@ NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
return Result.convertFromString(Str, APFloat::rmNearestTiesToEven);
}
-
/// \verbatim
/// user-defined-character-literal: [C++11 lex.ext]
/// character-literal ud-suffix
@@ -1103,11 +1114,11 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
char const *tmp_in_start = start;
uint32_t *tmp_out_start = buffer_begin;
- ConversionResult res =
- ConvertUTF8toUTF32(reinterpret_cast<UTF8 const **>(&start),
- reinterpret_cast<UTF8 const *>(begin),
- &buffer_begin, buffer_end, strictConversion);
- if (res != conversionOK) {
+ llvm::ConversionResult res =
+ llvm::ConvertUTF8toUTF32(reinterpret_cast<llvm::UTF8 const **>(&start),
+ reinterpret_cast<llvm::UTF8 const *>(begin),
+ &buffer_begin, buffer_end, llvm::strictConversion);
+ if (res != llvm::conversionOK) {
// If we see bad encoding for unprefixed character literals, warn and
// simply copy the byte values, for compatibility with gcc and
// older versions of clang.
@@ -1499,13 +1510,13 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
if (CharByteWidth == 4) {
// FIXME: Make the type of the result buffer correct instead of
// using reinterpret_cast.
- UTF32 *ResultWidePtr = reinterpret_cast<UTF32*>(ResultPtr);
+ llvm::UTF32 *ResultWidePtr = reinterpret_cast<llvm::UTF32*>(ResultPtr);
*ResultWidePtr = ResultChar;
ResultPtr += 4;
} else if (CharByteWidth == 2) {
// FIXME: Make the type of the result buffer correct instead of
// using reinterpret_cast.
- UTF16 *ResultWidePtr = reinterpret_cast<UTF16*>(ResultPtr);
+ llvm::UTF16 *ResultWidePtr = reinterpret_cast<llvm::UTF16*>(ResultPtr);
*ResultWidePtr = ResultChar & 0xFFFF;
ResultPtr += 2;
} else {
@@ -1520,12 +1531,12 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
if (CharByteWidth == 4) {
// FIXME: Make the type of the result buffer correct instead of
// using reinterpret_cast.
- UTF32 *ResultWidePtr = reinterpret_cast<UTF32*>(ResultBuf.data());
+ llvm::UTF32 *ResultWidePtr = reinterpret_cast<llvm::UTF32*>(ResultBuf.data());
ResultWidePtr[0] = GetNumStringChars() - 1;
} else if (CharByteWidth == 2) {
// FIXME: Make the type of the result buffer correct instead of
// using reinterpret_cast.
- UTF16 *ResultWidePtr = reinterpret_cast<UTF16*>(ResultBuf.data());
+ llvm::UTF16 *ResultWidePtr = reinterpret_cast<llvm::UTF16*>(ResultBuf.data());
ResultWidePtr[0] = GetNumStringChars() - 1;
} else {
assert(CharByteWidth == 1 && "Unexpected char width");
@@ -1559,7 +1570,7 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
static const char *resyncUTF8(const char *Err, const char *End) {
if (Err == End)
return End;
- End = Err + std::min<unsigned>(getNumBytesForUTF8(*Err), End-Err);
+ End = Err + std::min<unsigned>(llvm::getNumBytesForUTF8(*Err), End-Err);
while (++Err != End && (*Err & 0xC0) == 0x80)
;
return Err;
@@ -1571,7 +1582,7 @@ static const char *resyncUTF8(const char *Err, const char *End) {
bool StringLiteralParser::CopyStringFragment(const Token &Tok,
const char *TokBegin,
StringRef Fragment) {
- const UTF8 *ErrorPtrTmp;
+ const llvm::UTF8 *ErrorPtrTmp;
if (ConvertUTF8toWide(CharByteWidth, Fragment, ResultPtr, ErrorPtrTmp))
return false;
@@ -1697,3 +1708,12 @@ unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
return SpellingPtr-SpellingStart;
}
+
+/// Determine whether a suffix is a valid ud-suffix. We avoid treating reserved
+/// suffixes as ud-suffixes, because the diagnostic experience is better if we
+/// treat it as an invalid suffix.
+bool StringLiteralParser::isValidUDSuffix(const LangOptions &LangOpts,
+ StringRef Suffix) {
+ return NumericLiteralParser::isValidUDSuffix(LangOpts, Suffix) ||
+ Suffix == "sv";
+}
diff --git a/lib/Lex/MacroInfo.cpp b/lib/Lex/MacroInfo.cpp
index 2ef4387b99ba..924613dcb840 100644
--- a/lib/Lex/MacroInfo.cpp
+++ b/lib/Lex/MacroInfo.cpp
@@ -240,6 +240,6 @@ ModuleMacro *ModuleMacro::create(Preprocessor &PP, Module *OwningModule,
ArrayRef<ModuleMacro *> Overrides) {
void *Mem = PP.getPreprocessorAllocator().Allocate(
sizeof(ModuleMacro) + sizeof(ModuleMacro *) * Overrides.size(),
- llvm::alignOf<ModuleMacro>());
+ alignof(ModuleMacro));
return new (Mem) ModuleMacro(OwningModule, II, Macro, Overrides);
}
diff --git a/lib/Lex/ModuleMap.cpp b/lib/Lex/ModuleMap.cpp
index 3e3215dee82a..9d0f2eb2fa79 100644
--- a/lib/Lex/ModuleMap.cpp
+++ b/lib/Lex/ModuleMap.cpp
@@ -297,11 +297,14 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
if (LangOpts.ModulesStrictDeclUse) {
Diags.Report(FilenameLoc, diag::err_undeclared_use_of_module)
<< RequestingModule->getFullModuleName() << Filename;
- } else if (RequestingModule && RequestingModuleIsModuleInterface) {
+ } else if (RequestingModule && RequestingModuleIsModuleInterface &&
+ LangOpts.isCompilingModule()) {
+ // Do not diagnose when we are not compiling a module.
diag::kind DiagID = RequestingModule->getTopLevelModule()->IsFramework ?
diag::warn_non_modular_include_in_framework_module :
diag::warn_non_modular_include_in_module;
- Diags.Report(FilenameLoc, DiagID) << RequestingModule->getFullModuleName();
+ Diags.Report(FilenameLoc, DiagID) << RequestingModule->getFullModuleName()
+ << File->getName();
}
}
@@ -325,9 +328,10 @@ static bool isBetterKnownHeader(const ModuleMap::KnownHeader &New,
return false;
}
-ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File) {
+ModuleMap::KnownHeader ModuleMap::findModuleForHeader(const FileEntry *File,
+ bool AllowTextual) {
auto MakeResult = [&](ModuleMap::KnownHeader R) -> ModuleMap::KnownHeader {
- if (R.getRole() & ModuleMap::TextualHeader)
+ if (!AllowTextual && R.getRole() & ModuleMap::TextualHeader)
return ModuleMap::KnownHeader();
return R;
};
@@ -558,6 +562,25 @@ ModuleMap::findOrCreateModule(StringRef Name, Module *Parent, bool IsFramework,
return std::make_pair(Result, true);
}
+Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
+ StringRef Name) {
+ assert(LangOpts.CurrentModule == Name && "module name mismatch");
+ assert(!Modules[Name] && "redefining existing module");
+
+ auto *Result =
+ new Module(Name, Loc, nullptr, /*IsFramework*/ false,
+ /*IsExplicit*/ false, NumCreatedModules++);
+ Modules[Name] = SourceModule = Result;
+
+ // Mark the main source file as being within the newly-created module so that
+ // declarations and macros are properly visibility-restricted to it.
+ auto *MainFile = SourceMgr.getFileEntryForID(SourceMgr.getMainFileID());
+ assert(MainFile && "no input file for module interface");
+ Headers[MainFile].push_back(KnownHeader(Result, PrivateHeader));
+
+ return Result;
+}
+
/// \brief For a framework module, infer the framework against which we
/// should link.
static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
@@ -573,8 +596,7 @@ static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
// The library name of a framework has more than one possible extension since
// the introduction of the text-based dynamic library format. We need to check
// for both before we give up.
- static const char *frameworkExtensions[] = {"", ".tbd"};
- for (const auto *extension : frameworkExtensions) {
+ for (const char *extension : {"", ".tbd"}) {
llvm::sys::path::replace_extension(LibName, extension);
if (FileMgr.getFile(LibName)) {
Mod->LinkLibraries.push_back(Module::LinkLibrary(Mod->Name,
@@ -653,6 +675,8 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
Attrs.IsSystem |= inferred->second.Attrs.IsSystem;
Attrs.IsExternC |= inferred->second.Attrs.IsExternC;
Attrs.IsExhaustive |= inferred->second.Attrs.IsExhaustive;
+ Attrs.NoUndeclaredIncludes |=
+ inferred->second.Attrs.NoUndeclaredIncludes;
ModuleMapFile = inferred->second.ModuleMapFile;
}
}
@@ -690,6 +714,7 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
Result->IsSystem |= Attrs.IsSystem;
Result->IsExternC |= Attrs.IsExternC;
Result->ConfigMacrosExhaustive |= Attrs.IsExhaustive;
+ Result->NoUndeclaredIncludes |= Attrs.NoUndeclaredIncludes;
Result->Directory = FrameworkDir;
// umbrella header "umbrella-header-name"
@@ -802,10 +827,10 @@ void ModuleMap::addHeader(Module *Mod, Module::Header Header,
return;
HeaderList.push_back(KH);
- Mod->Headers[headerRoleToKind(Role)].push_back(std::move(Header));
+ Mod->Headers[headerRoleToKind(Role)].push_back(Header);
bool isCompilingModuleHeader =
- LangOpts.CompilingModule && Mod->getTopLevelModule() == SourceModule;
+ LangOpts.isCompilingModule() && Mod->getTopLevelModule() == SourceModule;
if (!Imported || isCompilingModuleHeader) {
// When we import HeaderFileInfo, the external source is expected to
// set the isModuleHeader flag itself.
@@ -1288,7 +1313,9 @@ namespace {
/// \brief The 'extern_c' attribute.
AT_extern_c,
/// \brief The 'exhaustive' attribute.
- AT_exhaustive
+ AT_exhaustive,
+ /// \brief The 'no_undeclared_includes' attribute.
+ AT_no_undeclared_includes
};
}
@@ -1458,8 +1485,47 @@ void ModuleMapParser::parseModuleDecl() {
ActiveModule->IsSystem = true;
if (Attrs.IsExternC)
ActiveModule->IsExternC = true;
+ if (Attrs.NoUndeclaredIncludes ||
+ (!ActiveModule->Parent && ModuleName == "Darwin"))
+ ActiveModule->NoUndeclaredIncludes = true;
ActiveModule->Directory = Directory;
+ if (!ActiveModule->Parent) {
+ StringRef MapFileName(ModuleMapFile->getName());
+ if (MapFileName.endswith("module.private.modulemap") ||
+ MapFileName.endswith("module_private.map")) {
+ // Adding a top-level module from a private modulemap is likely a
+ // user error; we check to see if there's another top-level module
+ // defined in the non-private map in the same dir, and if so emit a
+ // warning.
+ for (auto E = Map.module_begin(); E != Map.module_end(); ++E) {
+ auto const *M = E->getValue();
+ if (!M->Parent &&
+ M->Directory == ActiveModule->Directory &&
+ M->Name != ActiveModule->Name) {
+ Diags.Report(ActiveModule->DefinitionLoc,
+ diag::warn_mmap_mismatched_top_level_private)
+ << ActiveModule->Name << M->Name;
+ // The pattern we're defending against here is typically due to
+ // a module named FooPrivate which is supposed to be a submodule
+ // called Foo.Private. Emit a fixit in that case.
+ auto D =
+ Diags.Report(ActiveModule->DefinitionLoc,
+ diag::note_mmap_rename_top_level_private_as_submodule);
+ D << ActiveModule->Name << M->Name;
+ StringRef Bad(ActiveModule->Name);
+ if (Bad.consume_back("Private")) {
+ SmallString<128> Fixed = Bad;
+ Fixed.append(".Private");
+ D << FixItHint::CreateReplacement(ActiveModule->DefinitionLoc,
+ Fixed);
+ }
+ break;
+ }
+ }
+ }
+ }
+
bool Done = false;
do {
switch (Tok.Kind) {
@@ -1624,15 +1690,12 @@ void ModuleMapParser::parseExternModuleDecl() {
/// was never correct and causes issues now that we check it, so drop it.
static bool shouldAddRequirement(Module *M, StringRef Feature,
bool &IsRequiresExcludedHack) {
- static const StringRef DarwinCExcluded[] = {"Darwin", "C", "excluded"};
- static const StringRef TclPrivate[] = {"Tcl", "Private"};
- static const StringRef IOKitAVC[] = {"IOKit", "avc"};
-
- if (Feature == "excluded" && (M->fullModuleNameIs(DarwinCExcluded) ||
- M->fullModuleNameIs(TclPrivate))) {
+ if (Feature == "excluded" &&
+ (M->fullModuleNameIs({"Darwin", "C", "excluded"}) ||
+ M->fullModuleNameIs({"Tcl", "Private"}))) {
IsRequiresExcludedHack = true;
return false;
- } else if (Feature == "cplusplus" && M->fullModuleNameIs(IOKitAVC)) {
+ } else if (Feature == "cplusplus" && M->fullModuleNameIs({"IOKit", "avc"})) {
return false;
}
@@ -1824,13 +1887,7 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
// If Clang supplies this header but the underlying system does not,
// just silently swap in our builtin version. Otherwise, we'll end
// up adding both (later).
- //
- // For local visibility, entirely replace the system file with our
- // one and textually include the system one. We need to pass macros
- // from our header to the system one if we #include_next it.
- //
- // FIXME: Can we do this in all cases?
- if (BuiltinFile && (!File || Map.LangOpts.ModulesLocalVisibility)) {
+ if (BuiltinFile && !File) {
File = BuiltinFile;
RelativePathName = BuiltinPathName;
BuiltinFile = nullptr;
@@ -1856,15 +1913,20 @@ void ModuleMapParser::parseHeaderDecl(MMToken::TokenKind LeadingToken,
Module::Header H = {RelativePathName.str(), File};
Map.excludeHeader(ActiveModule, H);
} else {
- // If there is a builtin counterpart to this file, add it now, before
- // the "real" header, so we build the built-in one first when building
- // the module.
+ // If there is a builtin counterpart to this file, add it now so it can
+ // wrap the system header.
if (BuiltinFile) {
// FIXME: Taking the name from the FileEntry is unstable and can give
// different results depending on how we've previously named that file
// in this build.
Module::Header H = { BuiltinFile->getName(), BuiltinFile };
Map.addHeader(ActiveModule, H, Role);
+
+ // If we have both a builtin and system version of the file, the
+ // builtin version may want to inject macros into the system header, so
+ // force the system header to be treated as a textual header in this
+ // case.
+ Role = ModuleMap::ModuleHeaderRole(Role | ModuleMap::TextualHeader);
}
// Record this header.
@@ -2354,6 +2416,7 @@ bool ModuleMapParser::parseOptionalAttributes(Attributes &Attrs) {
= llvm::StringSwitch<AttributeKind>(Tok.getString())
.Case("exhaustive", AT_exhaustive)
.Case("extern_c", AT_extern_c)
+ .Case("no_undeclared_includes", AT_no_undeclared_includes)
.Case("system", AT_system)
.Default(AT_unknown);
switch (Attribute) {
@@ -2373,6 +2436,10 @@ bool ModuleMapParser::parseOptionalAttributes(Attributes &Attrs) {
case AT_exhaustive:
Attrs.IsExhaustive = true;
break;
+
+ case AT_no_undeclared_includes:
+ Attrs.NoUndeclaredIncludes = true;
+ break;
}
consumeToken();
diff --git a/lib/Lex/PPCaching.cpp b/lib/Lex/PPCaching.cpp
index 4742aae5c123..45bdce32062a 100644
--- a/lib/Lex/PPCaching.cpp
+++ b/lib/Lex/PPCaching.cpp
@@ -86,7 +86,7 @@ void Preprocessor::EnterCachingLexMode() {
const Token &Preprocessor::PeekAhead(unsigned N) {
assert(CachedLexPos + N > CachedTokens.size() && "Confused caching.");
ExitCachingLexMode();
- for (unsigned C = CachedLexPos + N - CachedTokens.size(); C > 0; --C) {
+ for (size_t C = CachedLexPos + N - CachedTokens.size(); C > 0; --C) {
CachedTokens.push_back(Token());
Lex(CachedTokens.back());
}
@@ -105,7 +105,7 @@ void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) {
for (CachedTokensTy::size_type i = CachedLexPos; i != 0; --i) {
CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1;
if (AnnotBegin->getLocation() == Tok.getLocation()) {
- assert((BacktrackPositions.empty() || BacktrackPositions.back() < i) &&
+ assert((BacktrackPositions.empty() || BacktrackPositions.back() <= i) &&
"The backtrack pos points inside the annotated tokens!");
// Replace the cached tokens with the single annotation token.
if (i < CachedLexPos)
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 77f118fd3ccb..85504de3d15d 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -12,25 +12,41 @@
///
//===----------------------------------------------------------------------===//
-#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/CharInfo.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/HeaderSearch.h"
-#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/ModuleLoader.h"
+#include "clang/Lex/ModuleMap.h"
+#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Pragma.h"
-#include "llvm/ADT/APInt.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PTHLexer.h"
+#include "clang/Lex/Token.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/iterator_range.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/AlignOf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/SaveAndRestore.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <new>
+#include <string>
+#include <utility>
using namespace clang;
@@ -53,7 +69,7 @@ MacroInfo *Preprocessor::AllocateMacroInfo(SourceLocation L) {
MacroInfo *Preprocessor::AllocateDeserializedMacroInfo(SourceLocation L,
unsigned SubModuleID) {
- static_assert(llvm::AlignOf<MacroInfo>::Alignment >= sizeof(SubModuleID),
+ static_assert(alignof(MacroInfo) >= sizeof(SubModuleID),
"alignment for MacroInfo is less than the ID");
DeserializedMacroInfoChain *MIChain =
BP.Allocate<DeserializedMacroInfoChain>();
@@ -268,7 +284,7 @@ bool Preprocessor::CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
if (ShadowFlag)
*ShadowFlag = false;
if (!SourceMgr.isInSystemHeader(MacroNameLoc) &&
- (strcmp(SourceMgr.getBufferName(MacroNameLoc), "<built-in>") != 0)) {
+ (SourceMgr.getBufferName(MacroNameLoc) != "<built-in>")) {
MacroDiag D = MD_NoWarn;
if (isDefineUndef == MU_Define) {
D = shouldWarnOnMacroDef(*this, II);
@@ -382,7 +398,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
// disabling warnings, etc.
CurPPLexer->LexingRawMode = true;
Token Tok;
- while (1) {
+ while (true) {
CurLexer->Lex(Tok);
if (Tok.is(tok::code_completion)) {
@@ -455,7 +471,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
Directive = RI;
} else {
std::string DirectiveStr = getSpelling(Tok);
- unsigned IdLen = DirectiveStr.size();
+ size_t IdLen = DirectiveStr.size();
if (IdLen >= 20) {
CurPPLexer->ParsingPreprocessorDirective = false;
// Restore comment saving mode.
@@ -578,7 +594,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
}
void Preprocessor::PTHSkipExcludedConditionalBlock() {
- while (1) {
+ while (true) {
assert(CurPTHLexer);
assert(CurPTHLexer->LexingRawMode == false);
@@ -785,8 +801,7 @@ const FileEntry *Preprocessor::LookupFile(
// headers included by quoted include directives.
// See: http://msdn.microsoft.com/en-us/library/36k2cdd4.aspx
if (LangOpts.MSVCCompat && !isAngled) {
- for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
- IncludeStackInfo &ISEntry = IncludeMacroStack[e - i - 1];
+ for (IncludeStackInfo &ISEntry : llvm::reverse(IncludeMacroStack)) {
if (IsFileLexer(ISEntry))
if ((FileEnt = ISEntry.ThePPLexer->getFileEntry()))
Includers.push_back(std::make_pair(FileEnt, FileEnt->getDir()));
@@ -849,8 +864,7 @@ const FileEntry *Preprocessor::LookupFile(
}
}
- for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
- IncludeStackInfo &ISEntry = IncludeMacroStack[e-i-1];
+ for (IncludeStackInfo &ISEntry : llvm::reverse(IncludeMacroStack)) {
if (IsFileLexer(ISEntry)) {
if ((CurFileEnt = ISEntry.ThePPLexer->getFileEntry())) {
if ((FE = HeaderInfo.LookupSubframeworkHeader(
@@ -998,11 +1012,11 @@ void Preprocessor::HandleDirective(Token &Result) {
case tok::pp_define:
return HandleDefineDirective(Result, ImmediatelyAfterTopLevelIfndef);
case tok::pp_undef:
- return HandleUndefDirective(Result);
+ return HandleUndefDirective();
// C99 6.10.4 - Line Control.
case tok::pp_line:
- return HandleLineDirective(Result);
+ return HandleLineDirective();
// C99 6.10.5 - Error Directive.
case tok::pp_error:
@@ -1039,7 +1053,7 @@ void Preprocessor::HandleDirective(Token &Result) {
case tok::pp___private_macro:
if (getLangOpts().Modules)
- return HandleMacroPrivateDirective(Result);
+ return HandleMacroPrivateDirective();
break;
}
break;
@@ -1137,7 +1151,7 @@ static bool GetLineValue(Token &DigitTok, unsigned &Val,
/// # line digit-sequence
/// # line digit-sequence "s-char-sequence"
/// \endverbatim
-void Preprocessor::HandleLineDirective(Token &Tok) {
+void Preprocessor::HandleLineDirective() {
// Read the line # and string argument. Per C99 6.10.4p5, these tokens are
// expanded.
Token DigitTok;
@@ -1442,7 +1456,7 @@ void Preprocessor::HandleMacroPublicDirective(Token &Tok) {
}
/// \brief Handle a #private directive.
-void Preprocessor::HandleMacroPrivateDirective(Token &Tok) {
+void Preprocessor::HandleMacroPrivateDirective() {
Token MacroNameTok;
ReadMacroName(MacroNameTok, MU_Undef);
@@ -1551,7 +1565,7 @@ bool Preprocessor::ConcatenateIncludeName(SmallString<128> &FilenameBuffer,
FilenameBuffer.push_back(' ');
// Get the spelling of the token, directly into FilenameBuffer if possible.
- unsigned PreAppendSize = FilenameBuffer.size();
+ size_t PreAppendSize = FilenameBuffer.size();
FilenameBuffer.resize(PreAppendSize+CurTok.getLength());
const char *BufPtr = &FilenameBuffer[PreAppendSize];
@@ -1602,7 +1616,7 @@ static void diagnoseAutoModuleImport(
assert(PP.getLangOpts().ObjC2 && "no import syntax available");
SmallString<128> PathString;
- for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ for (size_t I = 0, N = Path.size(); I != N; ++I) {
if (I)
PathString += '.';
PathString += Path[I].first->getName();
@@ -1831,7 +1845,8 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// If the file is still not found, just go with the vanilla diagnostic
if (!File)
- Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
+ Diag(FilenameTok, diag::err_pp_file_not_found) << Filename
+ << FilenameRange;
}
}
@@ -1851,12 +1866,9 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
// unavailable, diagnose the situation and bail out.
// FIXME: Remove this; loadModule does the same check (but produces
// slightly worse diagnostics).
- if (!SuggestedModule.getModule()->isAvailable() &&
- !SuggestedModule.getModule()
- ->getTopLevelModule()
- ->HasIncompatibleModuleFile) {
- clang::Module::Requirement Requirement;
- clang::Module::UnresolvedHeaderDirective MissingHeader;
+ if (!SuggestedModule.getModule()->isAvailable()) {
+ Module::Requirement Requirement;
+ Module::UnresolvedHeaderDirective MissingHeader;
Module *M = SuggestedModule.getModule();
// Identify the cause.
(void)M->isAvailable(getLangOpts(), getTargetInfo(), Requirement,
@@ -1903,9 +1915,12 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
else if (Imported.isMissingExpected()) {
// We failed to find a submodule that we assumed would exist (because it
// was in the directory of an umbrella header, for instance), but no
- // actual module exists for it (because the umbrella header is
+ // actual module containing it exists (because the umbrella header is
// incomplete). Treat this as a textual inclusion.
SuggestedModule = ModuleMap::KnownHeader();
+ } else if (Imported.isConfigMismatch()) {
+ // On a configuration mismatch, enter the header textually. We still know
+ // that it's part of the corresponding module.
} else {
// We hit an error processing the import. Bail out.
if (hadModuleLoaderFatalFailure()) {
@@ -2043,7 +2058,11 @@ void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
// diagnostic.
const DirectoryLookup *Lookup = CurDirLookup;
const FileEntry *LookupFromFile = nullptr;
- if (isInPrimaryFile()) {
+ if (isInPrimaryFile() && LangOpts.IsHeaderFile) {
+ // If the main file is a header, then it's either for PCH/AST generation,
+ // or libclang opened it. Either way, handle it as a normal include below
+ // and do not complain about include_next.
+ } else if (isInPrimaryFile()) {
Lookup = nullptr;
Diag(IncludeNextTok, diag::pp_include_next_in_primary);
} else if (CurSubmodule) {
@@ -2098,7 +2117,7 @@ void Preprocessor::HandleIncludeMacrosDirective(SourceLocation HashLoc,
// This directive should only occur in the predefines buffer. If not, emit an
// error and reject it.
SourceLocation Loc = IncludeMacrosTok.getLocation();
- if (strcmp(SourceMgr.getBufferName(Loc), "<built-in>") != 0) {
+ if (SourceMgr.getBufferName(Loc) != "<built-in>") {
Diag(IncludeMacrosTok.getLocation(),
diag::pp_include_macros_out_of_predefines);
DiscardUntilEndOfDirective();
@@ -2127,7 +2146,7 @@ void Preprocessor::HandleIncludeMacrosDirective(SourceLocation HashLoc,
bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI, Token &Tok) {
SmallVector<IdentifierInfo*, 32> Arguments;
- while (1) {
+ while (true) {
LexUnexpandedToken(Tok);
switch (Tok.getKind()) {
case tok::r_paren:
@@ -2536,7 +2555,7 @@ void Preprocessor::HandleDefineDirective(Token &DefineTok,
/// HandleUndefDirective - Implements \#undef.
///
-void Preprocessor::HandleUndefDirective(Token &UndefTok) {
+void Preprocessor::HandleUndefDirective() {
++NumUndefined;
Token MacroNameTok;
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
index 94075ece35ca..862a4713e4bc 100644
--- a/lib/Lex/PPExpressions.cpp
+++ b/lib/Lex/PPExpressions.cpp
@@ -17,14 +17,24 @@
//===----------------------------------------------------------------------===//
#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Token.h"
#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
+#include <cassert>
+
using namespace clang;
namespace {
@@ -34,6 +44,7 @@ namespace {
class PPValue {
SourceRange Range;
IdentifierInfo *II;
+
public:
llvm::APSInt Val;
@@ -58,7 +69,7 @@ public:
void setEnd(SourceLocation L) { Range.setEnd(L); }
};
-}
+} // end anonymous namespace
static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
Token &PeekTok, bool ValueLive,
@@ -469,8 +480,6 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
}
}
-
-
/// getPrecedence - Return the precedence of the specified binary operator
/// token. This returns:
/// ~0 - Invalid token.
@@ -531,7 +540,7 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
return true;
}
- while (1) {
+ while (true) {
// If this token has a lower precedence than we are allowed to parse, return
// it so that higher levels of the recursion can parse it.
if (PeekPrec < MinPrec)
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index e2eceafd983b..4db17c344b67 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -40,10 +40,10 @@ bool Preprocessor::isInPrimaryFile() const {
// If there are any stacked lexers, we're in a #include.
assert(IsFileLexer(IncludeMacroStack[0]) &&
"Top level include stack isn't our primary lexer?");
- for (unsigned i = 1, e = IncludeMacroStack.size(); i != e; ++i)
- if (IsFileLexer(IncludeMacroStack[i]))
- return false;
- return true;
+ return std::none_of(IncludeMacroStack.begin() + 1, IncludeMacroStack.end(),
+ [this](const IncludeStackInfo &ISI) -> bool {
+ return IsFileLexer(ISI);
+ });
}
/// getCurrentLexer - Return the current file lexer being lexed from. Note
@@ -54,8 +54,7 @@ PreprocessorLexer *Preprocessor::getCurrentFileLexer() const {
return CurPPLexer;
// Look for a stacked lexer.
- for (unsigned i = IncludeMacroStack.size(); i != 0; --i) {
- const IncludeStackInfo& ISI = IncludeMacroStack[i-1];
+ for (const IncludeStackInfo &ISI : llvm::reverse(IncludeMacroStack)) {
if (IsFileLexer(ISI))
return ISI.ThePPLexer;
}
@@ -566,8 +565,7 @@ void Preprocessor::HandleMicrosoftCommentPaste(Token &Tok) {
// explicit EOD token.
PreprocessorLexer *FoundLexer = nullptr;
bool LexerWasInPPMode = false;
- for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
- IncludeStackInfo &ISI = *(IncludeMacroStack.end()-i-1);
+ for (const IncludeStackInfo &ISI : llvm::reverse(IncludeMacroStack)) {
if (ISI.ThePPLexer == nullptr) continue; // Scan for a real lexer.
// Once we find a real lexer, mark it as raw mode (disabling macro
@@ -685,7 +683,7 @@ bool Preprocessor::needModuleMacros() const {
return true;
// Otherwise, we only need module macros if we're actually compiling a module
// interface.
- return getLangOpts().CompilingModule;
+ return getLangOpts().isCompilingModule();
}
void Preprocessor::LeaveSubmodule() {
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index 2ade6df9456a..aebebaac46ac 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -12,25 +12,49 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Lex/Preprocessor.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/DirectoryLookup.h"
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Lex/MacroArgs.h"
#include "clang/Lex/MacroInfo.h"
-#include "llvm/ADT/STLExtras.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorLexer.h"
+#include "clang/Lex/PTHLexer.h"
+#include "clang/Lex/Token.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
-#include <cstdio>
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstring>
#include <ctime>
+#include <string>
+#include <tuple>
+#include <utility>
+
using namespace clang;
MacroDirective *
@@ -68,12 +92,35 @@ void Preprocessor::appendMacroDirective(IdentifierInfo *II, MacroDirective *MD){
}
void Preprocessor::setLoadedMacroDirective(IdentifierInfo *II,
+ MacroDirective *ED,
MacroDirective *MD) {
+ // Normally, when a macro is defined, it goes through appendMacroDirective()
+ // above, which chains a macro to previous defines, undefs, etc.
+ // However, in a pch, the whole macro history up to the end of the pch is
+ // stored, so ASTReader goes through this function instead.
+ // However, built-in macros are already registered in the Preprocessor
+ // ctor, and ASTWriter stops writing the macro chain at built-in macros,
+ // so in that case the chain from the pch needs to be spliced to the existing
+ // built-in.
+
assert(II && MD);
MacroState &StoredMD = CurSubmoduleState->Macros[II];
- assert(!StoredMD.getLatest() &&
- "the macro history was modified before initializing it from a pch");
- StoredMD = MD;
+
+ if (auto *OldMD = StoredMD.getLatest()) {
+ // shouldIgnoreMacro() in ASTWriter also stops at macros from the
+ // predefines buffer in module builds. However, in module builds, modules
+ // are loaded completely before predefines are processed, so StoredMD
+ // will be nullptr for them when they're loaded. StoredMD should only be
+ // non-nullptr for builtins read from a pch file.
+ assert(OldMD->getMacroInfo()->isBuiltinMacro() &&
+ "only built-ins should have an entry here");
+ assert(!OldMD->getPrevious() && "builtin should only have a single entry");
+ ED->setPrevious(OldMD);
+ StoredMD.setLatest(MD);
+ } else {
+ StoredMD = MD;
+ }
+
// Setup the identifier as having associated macro history.
II->setHasMacroDefinition(true);
if (!MD->isDefined() && LeafModuleMacros.find(II) == LeafModuleMacros.end())
@@ -286,7 +333,6 @@ static IdentifierInfo *RegisterBuiltinMacro(Preprocessor &PP, const char *Name){
return Id;
}
-
/// RegisterBuiltinMacros - Register builtin macros, such as __LINE__ with the
/// identifier table.
void Preprocessor::RegisterBuiltinMacros() {
@@ -367,10 +413,8 @@ static bool isTrivialSingleTokenExpansion(const MacroInfo *MI,
// If this is a function-like macro invocation, it's safe to trivially expand
// as long as the identifier is not a macro argument.
return std::find(MI->arg_begin(), MI->arg_end(), II) == MI->arg_end();
-
}
-
/// isNextPPTokenLParen - Determine whether the next preprocessor token to be
/// lexed is a '('. If so, consume the token and return true, if not, this
/// method should have no observable side-effect on the lexed tokens.
@@ -390,8 +434,7 @@ bool Preprocessor::isNextPPTokenLParen() {
// macro stack.
if (CurPPLexer)
return false;
- for (unsigned i = IncludeMacroStack.size(); i != 0; --i) {
- IncludeStackInfo &Entry = IncludeMacroStack[i-1];
+ for (const IncludeStackInfo &Entry : llvm::reverse(IncludeMacroStack)) {
if (Entry.TheLexer)
Val = Entry.TheLexer->isNextPPTokenLParen();
else if (Entry.ThePTHLexer)
@@ -480,8 +523,7 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
} else {
Callbacks->MacroExpands(Identifier, M, ExpansionRange, Args);
if (!DelayedMacroExpandsCallbacks.empty()) {
- for (unsigned i=0, e = DelayedMacroExpandsCallbacks.size(); i!=e; ++i) {
- MacroExpandsInfo &Info = DelayedMacroExpandsCallbacks[i];
+ for (const MacroExpandsInfo &Info : DelayedMacroExpandsCallbacks) {
// FIXME: We lose macro args info with delayed callback.
Callbacks->MacroExpands(Info.Tok, Info.MD, Info.Range,
/*Args=*/nullptr);
@@ -735,14 +777,14 @@ MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
assert(Tok.isOneOf(tok::l_paren, tok::comma) &&
"only expect argument separators here");
- unsigned ArgTokenStart = ArgTokens.size();
+ size_t ArgTokenStart = ArgTokens.size();
SourceLocation ArgStartLoc = Tok.getLocation();
// C99 6.10.3p11: Keep track of the number of l_parens we have seen. Note
// that we already consumed the first one.
unsigned NumParens = 0;
- while (1) {
+ while (true) {
// Read arguments as unexpanded tokens. This avoids issues, e.g., where
// an argument value in a macro could expand to ',' or '(' or ')'.
LexUnexpandedToken(Tok);
@@ -987,10 +1029,10 @@ Token *Preprocessor::cacheMacroExpandedTokens(TokenLexer *tokLexer,
if (cacheNeedsToGrow) {
// Go through all the TokenLexers whose 'Tokens' pointer points in the
// buffer and update the pointers to the (potential) new buffer array.
- for (unsigned i = 0, e = MacroExpandingLexersStack.size(); i != e; ++i) {
+ for (const auto &Lexer : MacroExpandingLexersStack) {
TokenLexer *prevLexer;
size_t tokIndex;
- std::tie(prevLexer, tokIndex) = MacroExpandingLexersStack[i];
+ std::tie(prevLexer, tokIndex) = Lexer;
prevLexer->Tokens = MacroExpandedTokens.data() + tokIndex;
}
}
@@ -1043,7 +1085,6 @@ static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
}
}
-
/// HasFeature - Return true if we recognize and implement the feature
/// specified by the identifier as a standard language feature.
static bool HasFeature(const Preprocessor &PP, StringRef Feature) {
@@ -1090,6 +1131,7 @@ static bool HasFeature(const Preprocessor &PP, StringRef Feature) {
.Case("cxx_rtti", LangOpts.RTTI && LangOpts.RTTIData)
.Case("enumerator_attributes", true)
.Case("nullability", true)
+ .Case("nullability_on_arrays", true)
.Case("memory_sanitizer", LangOpts.Sanitize.has(SanitizerKind::Memory))
.Case("thread_sanitizer", LangOpts.Sanitize.has(SanitizerKind::Thread))
.Case("dataflow_sanitizer", LangOpts.Sanitize.has(SanitizerKind::DataFlow))
@@ -1171,7 +1213,7 @@ static bool HasFeature(const Preprocessor &PP, StringRef Feature) {
.Case("cxx_unrestricted_unions", LangOpts.CPlusPlus11)
.Case("cxx_user_literals", LangOpts.CPlusPlus11)
.Case("cxx_variadic_templates", LangOpts.CPlusPlus11)
- // C++1y features
+ // C++14 features
.Case("cxx_aggregate_nsdmi", LangOpts.CPlusPlus14)
.Case("cxx_binary_literals", LangOpts.CPlusPlus14)
.Case("cxx_contextual_conversions", LangOpts.CPlusPlus14)
@@ -1181,6 +1223,9 @@ static bool HasFeature(const Preprocessor &PP, StringRef Feature) {
.Case("cxx_relaxed_constexpr", LangOpts.CPlusPlus14)
.Case("cxx_return_type_deduction", LangOpts.CPlusPlus14)
.Case("cxx_variable_templates", LangOpts.CPlusPlus14)
+ // NOTE: For features covered by SD-6, it is preferable to provide *only*
+ // the SD-6 macro and not a __has_feature check.
+
// C++ TSes
//.Case("cxx_runtime_arrays", LangOpts.CPlusPlusTSArrays)
//.Case("cxx_concepts", LangOpts.CPlusPlusTSConcepts)
@@ -1264,7 +1309,7 @@ static bool HasExtension(const Preprocessor &PP, StringRef Extension) {
.Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus)
.Case("cxx_rvalue_references", LangOpts.CPlusPlus)
.Case("cxx_variadic_templates", LangOpts.CPlusPlus)
- // C++1y features supported by other languages as extensions.
+ // C++14 features supported by other languages as extensions.
.Case("cxx_binary_literals", true)
.Case("cxx_init_captures", LangOpts.CPlusPlus11)
.Case("cxx_variable_templates", LangOpts.CPlusPlus)
@@ -1400,7 +1445,11 @@ static bool EvaluateHasIncludeNext(Token &Tok,
// Preprocessor::HandleIncludeNextDirective.
const DirectoryLookup *Lookup = PP.GetCurDirLookup();
const FileEntry *LookupFromFile = nullptr;
- if (PP.isInPrimaryFile()) {
+ if (PP.isInPrimaryFile() && PP.getLangOpts().IsHeaderFile) {
+ // If the main file is a header, then it's either for PCH/AST generation,
+ // or libclang opened it. Either way, handle it as a normal include below
+ // and do not complain about __has_include_next.
+ } else if (PP.isInPrimaryFile()) {
Lookup = nullptr;
PP.Diag(Tok, diag::pp_include_next_in_primary);
} else if (PP.getCurrentSubmodule()) {
@@ -1796,7 +1845,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_expected_id_building_module);
- return getLangOpts().CompilingModule && II &&
+ return getLangOpts().isCompilingModule() && II &&
(II->getName() == getLangOpts().CurrentModule);
});
} else if (II == Ident__MODULE__) {
diff --git a/lib/Lex/PTHLexer.cpp b/lib/Lex/PTHLexer.cpp
index 5f63d35c5be7..ec806e844531 100644
--- a/lib/Lex/PTHLexer.cpp
+++ b/lib/Lex/PTHLexer.cpp
@@ -21,7 +21,6 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/MemoryBuffer.h"
#include <memory>
@@ -318,7 +317,7 @@ public:
class PTHFileLookupCommonTrait {
public:
- typedef std::pair<unsigned char, const char*> internal_key_type;
+ typedef std::pair<unsigned char, StringRef> internal_key_type;
typedef unsigned hash_value_type;
typedef unsigned offset_type;
@@ -353,7 +352,7 @@ public:
}
static bool EqualKey(internal_key_type a, internal_key_type b) {
- return a.first == b.first && strcmp(a.second, b.second) == 0;
+ return a.first == b.first && a.second == b.second;
}
static PTHFileData ReadData(const internal_key_type& k,
@@ -629,15 +628,15 @@ PTHLexer *PTHManager::CreateLexer(FileID FID) {
namespace {
class PTHStatData {
public:
- const bool HasData;
uint64_t Size;
time_t ModTime;
llvm::sys::fs::UniqueID UniqueID;
+ const bool HasData;
bool IsDirectory;
PTHStatData(uint64_t Size, time_t ModTime, llvm::sys::fs::UniqueID UniqueID,
bool IsDirectory)
- : HasData(true), Size(Size), ModTime(ModTime), UniqueID(UniqueID),
+ : Size(Size), ModTime(ModTime), UniqueID(UniqueID), HasData(true),
IsDirectory(IsDirectory) {}
PTHStatData() : HasData(false) {}
@@ -645,10 +644,10 @@ public:
class PTHStatLookupTrait : public PTHFileLookupCommonTrait {
public:
- typedef const char* external_key_type; // const char*
+ typedef StringRef external_key_type; // const char*
typedef PTHStatData data_type;
- static internal_key_type GetInternalKey(const char *path) {
+ static internal_key_type GetInternalKey(StringRef path) {
// The key 'kind' doesn't matter here because it is ignored in EqualKey.
return std::make_pair((unsigned char) 0x0, path);
}
@@ -656,7 +655,7 @@ public:
static bool EqualKey(internal_key_type a, internal_key_type b) {
// When doing 'stat' lookups we don't care about the kind of 'a' and 'b',
// just the paths.
- return strcmp(a.second, b.second) == 0;
+ return a.second == b.second;
}
static data_type ReadData(const internal_key_type& k, const unsigned char* d,
@@ -695,7 +694,7 @@ public:
: Cache(FL.getNumBuckets(), FL.getNumEntries(), FL.getBuckets(),
FL.getBase()) {}
- LookupResult getStat(const char *Path, FileData &Data, bool isFile,
+ LookupResult getStat(StringRef Path, FileData &Data, bool isFile,
std::unique_ptr<vfs::File> *F,
vfs::FileSystem &FS) override {
// Do the lookup for the file's data in the PTH file.
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index 3bdd31b26ff8..100da514144a 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -14,21 +14,36 @@
#include "clang/Lex/Pragma.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
-#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PPCallbacks.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorLexer.h"
+#include "clang/Lex/PTHLexer.h"
+#include "clang/Lex/Token.h"
+#include "clang/Lex/TokenLexer.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
-using namespace clang;
+#include <cassert>
+#include <cstdint>
+#include <limits>
+#include <string>
+#include <vector>
-#include "llvm/Support/raw_ostream.h"
+using namespace clang;
// Out-of-line destructor to provide a home for the class.
PragmaHandler::~PragmaHandler() {
@@ -123,6 +138,7 @@ void Preprocessor::HandlePragmaDirective(SourceLocation IntroducerLoc,
}
namespace {
+
/// \brief Helper class for \see Preprocessor::Handle_Pragma.
class LexingFor_PragmaRAII {
Preprocessor &PP;
@@ -157,7 +173,8 @@ public:
Failed = true;
}
};
-}
+
+} // end anonymous namespace
/// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
/// return the first token after the directive. The _Pragma token has just
@@ -264,7 +281,7 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
// Remove escaped quotes and escapes.
unsigned ResultPos = 1;
- for (unsigned i = 1, e = StrVal.size() - 1; i != e; ++i) {
+ for (size_t i = 1, e = StrVal.size() - 1; i != e; ++i) {
// Skip escapes. \\ -> '\' and \" -> '"'.
if (StrVal[i] == '\\' && i + 1 < e &&
(StrVal[i + 1] == '\\' || StrVal[i + 1] == '"'))
@@ -355,8 +372,10 @@ void Preprocessor::HandleMicrosoft__pragma(Token &Tok) {
///
void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
// Don't honor the 'once' when handling the primary source file, unless
- // this is a prefix to a TU, which indicates we're generating a PCH file.
- if (isInPrimaryFile() && TUKind != TU_Prefix) {
+ // this is a prefix to a TU, which indicates we're generating a PCH file, or
+ // when the main file is a header (e.g. when -xc-header is provided on the
+ // commandline).
+ if (isInPrimaryFile() && TUKind != TU_Prefix && !getLangOpts().IsHeaderFile) {
Diag(OnceTok, diag::pp_pragma_once_in_main_file);
return;
}
@@ -374,13 +393,12 @@ void Preprocessor::HandlePragmaMark() {
CurPTHLexer->DiscardToEndOfLine();
}
-
/// HandlePragmaPoison - Handle \#pragma GCC poison. PoisonTok is the 'poison'.
///
-void Preprocessor::HandlePragmaPoison(Token &PoisonTok) {
+void Preprocessor::HandlePragmaPoison() {
Token Tok;
- while (1) {
+ while (true) {
// Read the next token to poison. While doing this, pretend that we are
// skipping while reading the identifier to poison.
// This avoids errors on code like:
@@ -612,7 +630,7 @@ void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) {
// Pop PragmaPushMacroInfo stack.
iter->second.pop_back();
- if (iter->second.size() == 0)
+ if (iter->second.empty())
PragmaPushMacroInfo.erase(iter);
} else {
Diag(MessageLoc, diag::warn_pragma_pop_macro_no_push)
@@ -809,6 +827,7 @@ bool Preprocessor::LexOnOffSwitch(tok::OnOffSwitch &Result) {
}
namespace {
+
/// PragmaOnceHandler - "\#pragma once" marks the file as atomically included.
struct PragmaOnceHandler : public PragmaHandler {
PragmaOnceHandler() : PragmaHandler("once") {}
@@ -823,6 +842,7 @@ struct PragmaOnceHandler : public PragmaHandler {
/// rest of the line is not lexed.
struct PragmaMarkHandler : public PragmaHandler {
PragmaMarkHandler() : PragmaHandler("mark") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &MarkTok) override {
PP.HandlePragmaMark();
@@ -832,9 +852,10 @@ struct PragmaMarkHandler : public PragmaHandler {
/// PragmaPoisonHandler - "\#pragma poison x" marks x as not usable.
struct PragmaPoisonHandler : public PragmaHandler {
PragmaPoisonHandler() : PragmaHandler("poison") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &PoisonTok) override {
- PP.HandlePragmaPoison(PoisonTok);
+ PP.HandlePragmaPoison();
}
};
@@ -842,14 +863,17 @@ struct PragmaPoisonHandler : public PragmaHandler {
/// as a system header, which silences warnings in it.
struct PragmaSystemHeaderHandler : public PragmaHandler {
PragmaSystemHeaderHandler() : PragmaHandler("system_header") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &SHToken) override {
PP.HandlePragmaSystemHeader(SHToken);
PP.CheckEndOfDirective("pragma");
}
};
+
struct PragmaDependencyHandler : public PragmaHandler {
PragmaDependencyHandler() : PragmaHandler("dependency") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &DepToken) override {
PP.HandlePragmaDependency(DepToken);
@@ -858,6 +882,7 @@ struct PragmaDependencyHandler : public PragmaHandler {
struct PragmaDebugHandler : public PragmaHandler {
PragmaDebugHandler() : PragmaHandler("__debug") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &DepToken) override {
Token Tok;
@@ -967,9 +992,11 @@ struct PragmaDebugHandler : public PragmaHandler {
struct PragmaDiagnosticHandler : public PragmaHandler {
private:
const char *Namespace;
+
public:
explicit PragmaDiagnosticHandler(const char *NS) :
PragmaHandler("diagnostic"), Namespace(NS) {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &DiagToken) override {
SourceLocation DiagLoc = DiagToken.getLocation();
@@ -1142,7 +1169,7 @@ struct PragmaWarningHandler : public PragmaHandler {
while (Tok.is(tok::numeric_constant)) {
uint64_t Value;
if (!PP.parseSimpleIntegerLiteral(Tok, Value) || Value == 0 ||
- Value > INT_MAX) {
+ Value > std::numeric_limits<int>::max()) {
PP.Diag(Tok, diag::warn_pragma_warning_expected_number);
return;
}
@@ -1267,17 +1294,18 @@ public:
/// macro on the top of the stack.
struct PragmaPushMacroHandler : public PragmaHandler {
PragmaPushMacroHandler() : PragmaHandler("push_macro") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &PushMacroTok) override {
PP.HandlePragmaPushMacro(PushMacroTok);
}
};
-
/// PragmaPopMacroHandler - "\#pragma pop_macro" sets the value of the
/// macro to the value on the top of the stack.
struct PragmaPopMacroHandler : public PragmaHandler {
PragmaPopMacroHandler() : PragmaHandler("pop_macro") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &PopMacroTok) override {
PP.HandlePragmaPopMacro(PopMacroTok);
@@ -1289,6 +1317,7 @@ struct PragmaPopMacroHandler : public PragmaHandler {
/// PragmaSTDC_FENV_ACCESSHandler - "\#pragma STDC FENV_ACCESS ...".
struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &Tok) override {
tok::OnOffSwitch OOS;
@@ -1303,6 +1332,7 @@ struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
PragmaSTDC_CX_LIMITED_RANGEHandler()
: PragmaHandler("CX_LIMITED_RANGE") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &Tok) override {
tok::OnOffSwitch OOS;
@@ -1313,6 +1343,7 @@ struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
/// PragmaSTDC_UnknownHandler - "\#pragma STDC ...".
struct PragmaSTDC_UnknownHandler : public PragmaHandler {
PragmaSTDC_UnknownHandler() {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &UnknownTok) override {
// C99 6.10.6p2, unknown forms are not allowed.
@@ -1324,6 +1355,7 @@ struct PragmaSTDC_UnknownHandler : public PragmaHandler {
/// \#pragma clang arc_cf_code_audited begin/end
struct PragmaARCCFCodeAuditedHandler : public PragmaHandler {
PragmaARCCFCodeAuditedHandler() : PragmaHandler("arc_cf_code_audited") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &NameTok) override {
SourceLocation Loc = NameTok.getLocation();
@@ -1378,6 +1410,7 @@ struct PragmaARCCFCodeAuditedHandler : public PragmaHandler {
/// \#pragma clang assume_nonnull begin/end
struct PragmaAssumeNonNullHandler : public PragmaHandler {
PragmaAssumeNonNullHandler() : PragmaHandler("assume_nonnull") {}
+
void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
Token &NameTok) override {
SourceLocation Loc = NameTok.getLocation();
@@ -1451,8 +1484,7 @@ struct PragmaRegionHandler : public PragmaHandler {
}
};
-} // end anonymous namespace
-
+} // end anonymous namespace
/// RegisterBuiltinPragmas - Install the standard preprocessor pragmas:
/// \#pragma GCC poison/system_header/dependency and \#pragma once.
diff --git a/lib/Lex/PreprocessingRecord.cpp b/lib/Lex/PreprocessingRecord.cpp
index 32e6de69f0db..13e15f3c943b 100644
--- a/lib/Lex/PreprocessingRecord.cpp
+++ b/lib/Lex/PreprocessingRecord.cpp
@@ -21,18 +21,13 @@ using namespace clang;
ExternalPreprocessingRecordSource::~ExternalPreprocessingRecordSource() { }
-
InclusionDirective::InclusionDirective(PreprocessingRecord &PPRec,
- InclusionKind Kind,
- StringRef FileName,
+ InclusionKind Kind, StringRef FileName,
bool InQuotes, bool ImportedModule,
- const FileEntry *File,
- SourceRange Range)
- : PreprocessingDirective(InclusionDirectiveKind, Range),
- InQuotes(InQuotes), Kind(Kind), ImportedModule(ImportedModule), File(File)
-{
- char *Memory
- = (char*)PPRec.Allocate(FileName.size() + 1, llvm::alignOf<char>());
+ const FileEntry *File, SourceRange Range)
+ : PreprocessingDirective(InclusionDirectiveKind, Range), InQuotes(InQuotes),
+ Kind(Kind), ImportedModule(ImportedModule), File(File) {
+ char *Memory = (char *)PPRec.Allocate(FileName.size() + 1, alignof(char));
memcpy(Memory, FileName.data(), FileName.size());
Memory[FileName.size()] = 0;
this->FileName = StringRef(Memory, FileName.size());
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index 78179dd7988d..0f7473b8c1ff 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -43,18 +43,27 @@
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Lex/ScratchBuffer.h"
-#include "llvm/ADT/APFloat.h"
-#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Capacity.h"
-#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <memory>
+#include <string>
#include <utility>
+#include <vector>
+
using namespace clang;
-template class llvm::Registry<clang::PragmaHandler>;
+LLVM_INSTANTIATE_REGISTRY(PragmaHandlerRegistry)
//===----------------------------------------------------------------------===//
ExternalPreprocessorSource::~ExternalPreprocessorSource() { }
@@ -74,11 +83,12 @@ Preprocessor::Preprocessor(IntrusiveRefCntPtr<PreprocessorOptions> PPOpts,
IncrementalProcessing(false), TUKind(TUKind), CodeComplete(nullptr),
CodeCompletionFile(nullptr), CodeCompletionOffset(0),
LastTokenWasAt(false), ModuleImportExpectsIdentifier(false),
- CodeCompletionReached(0), MainFileDir(nullptr),
- SkipMainFilePreamble(0, true), CurPPLexer(nullptr), CurDirLookup(nullptr),
- CurLexerKind(CLK_Lexer), CurSubmodule(nullptr), Callbacks(nullptr),
- CurSubmoduleState(&NullSubmoduleState), MacroArgCache(nullptr),
- Record(nullptr), MIChainHead(nullptr), DeserialMIChainHead(nullptr) {
+ CodeCompletionReached(false), CodeCompletionII(nullptr),
+ MainFileDir(nullptr), SkipMainFilePreamble(0, true), CurPPLexer(nullptr),
+ CurDirLookup(nullptr), CurLexerKind(CLK_Lexer), CurSubmodule(nullptr),
+ Callbacks(nullptr), CurSubmoduleState(&NullSubmoduleState),
+ MacroArgCache(nullptr), Record(nullptr), MIChainHead(nullptr),
+ DeserialMIChainHead(nullptr) {
OwnsHeaderSearch = OwnsHeaders;
CounterValue = 0; // __COUNTER__ starts at 0.
@@ -480,7 +490,7 @@ void Preprocessor::CreateString(StringRef Str, Token &Tok,
}
Module *Preprocessor::getCurrentModule() {
- if (!getLangOpts().CompilingModule)
+ if (!getLangOpts().isCompilingModule())
return nullptr;
return getHeaderSearchInfo().lookupModule(getLangOpts().CurrentModule);
@@ -490,7 +500,6 @@ Module *Preprocessor::getCurrentModule() {
// Preprocessor Initialization Methods
//===----------------------------------------------------------------------===//
-
/// EnterMainSourceFile - Enter the specified FileID as the main source file,
/// which implicitly adds the builtin defines etc.
void Preprocessor::EnterMainSourceFile() {
@@ -618,6 +627,11 @@ static diag::kind getFutureCompatDiagKind(const IdentifierInfo &II,
"Keyword not known to come from a newer Standard or proposed Standard");
}
+void Preprocessor::updateOutOfDateIdentifier(IdentifierInfo &II) const {
+ assert(II.isOutOfDate() && "not out of date");
+ getExternalSource()->updateOutOfDateIdentifier(II);
+}
+
/// HandleIdentifier - This callback is invoked when the lexer reads an
/// identifier. This callback looks up the identifier in the map and/or
/// potentially macro expands it or turns it into a named token (like 'for').
@@ -642,7 +656,7 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) {
if (&II == Ident__VA_ARGS__)
CurrentIsPoisoned = Ident__VA_ARGS__->isPoisoned();
- ExternalSource->updateOutOfDateIdentifier(II);
+ updateOutOfDateIdentifier(II);
Identifier.setKind(II.getTokenID());
if (&II == Ident__VA_ARGS__)
@@ -707,9 +721,12 @@ bool Preprocessor::HandleIdentifier(Token &Identifier) {
// Note that we do not treat 'import' as a contextual
// keyword when we're in a caching lexer, because caching lexers only get
// used in contexts where import declarations are disallowed.
- if (LastTokenWasAt && II.isModulesImport() && !InMacroArgs &&
- !DisableMacroExpansion &&
- (getLangOpts().Modules || getLangOpts().DebuggerSupport) &&
+ //
+ // Likewise if this is the C++ Modules TS import keyword.
+ if (((LastTokenWasAt && II.isModulesImport()) ||
+ Identifier.is(tok::kw_import)) &&
+ !InMacroArgs && !DisableMacroExpansion &&
+ (getLangOpts().Modules || getLangOpts().DebuggerSupport) &&
CurLexerKind != CLK_CachingLexer) {
ModuleImportLoc = Identifier.getLocation();
ModuleImportPath.clear();
@@ -744,10 +761,12 @@ void Preprocessor::Lex(Token &Result) {
}
} while (!ReturnedToken);
+ if (Result.is(tok::code_completion))
+ setCodeCompletionIdentifierInfo(Result.getIdentifierInfo());
+
LastTokenWasAt = Result.is(tok::at);
}
-
/// \brief Lex a token following the 'import' contextual keyword.
///
void Preprocessor::LexAfterModuleImport(Token &Result) {
@@ -774,7 +793,8 @@ void Preprocessor::LexAfterModuleImport(Token &Result) {
}
// If we're expecting a '.' or a ';', and we got a '.', then wait until we
- // see the next identifier.
+ // see the next identifier. (We can also see a '[[' that begins an
+ // attribute-specifier-seq here under the C++ Modules TS.)
if (!ModuleImportExpectsIdentifier && Result.getKind() == tok::period) {
ModuleImportExpectsIdentifier = true;
CurLexerKind = CLK_LexAfterModuleImport;
@@ -783,6 +803,23 @@ void Preprocessor::LexAfterModuleImport(Token &Result) {
// If we have a non-empty module path, load the named module.
if (!ModuleImportPath.empty()) {
+ // Under the Modules TS, the dot is just part of the module name, and not
+ // a real hierarachy separator. Flatten such module names now.
+ //
+ // FIXME: Is this the right level to be performing this transformation?
+ std::string FlatModuleName;
+ if (getLangOpts().ModulesTS) {
+ for (auto &Piece : ModuleImportPath) {
+ if (!FlatModuleName.empty())
+ FlatModuleName += ".";
+ FlatModuleName += Piece.first->getName();
+ }
+ SourceLocation FirstPathLoc = ModuleImportPath[0].second;
+ ModuleImportPath.clear();
+ ModuleImportPath.push_back(
+ std::make_pair(getIdentifierInfo(FlatModuleName), FirstPathLoc));
+ }
+
Module *Imported = nullptr;
if (getLangOpts().Modules) {
Imported = TheModuleLoader.loadModule(ModuleImportLoc,
diff --git a/lib/Lex/TokenConcatenation.cpp b/lib/Lex/TokenConcatenation.cpp
index 27b4eab1a28d..d1facd9c6879 100644
--- a/lib/Lex/TokenConcatenation.cpp
+++ b/lib/Lex/TokenConcatenation.cpp
@@ -232,7 +232,7 @@ bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
// it as an identifier.
if (!PrevTok.hasUDSuffix())
return false;
- // FALL THROUGH.
+ LLVM_FALLTHROUGH;
case tok::identifier: // id+id or id+number or id+L"foo".
// id+'.'... will not append.
if (Tok.is(tok::numeric_constant))
diff --git a/lib/Lex/TokenLexer.cpp b/lib/Lex/TokenLexer.cpp
index 994bae632aec..a53c8014ebaf 100644
--- a/lib/Lex/TokenLexer.cpp
+++ b/lib/Lex/TokenLexer.cpp
@@ -275,7 +275,7 @@ void TokenLexer::ExpandFunctionArguments() {
// If the arg token expanded into anything, append it.
if (ResultArgToks->isNot(tok::eof)) {
- unsigned FirstResult = ResultToks.size();
+ size_t FirstResult = ResultToks.size();
unsigned NumToks = MacroArgs::getArgLength(ResultArgToks);
ResultToks.append(ResultArgToks, ResultArgToks+NumToks);
@@ -289,8 +289,8 @@ void TokenLexer::ExpandFunctionArguments() {
// If the '##' came from expanding an argument, turn it into 'unknown'
// to avoid pasting.
- for (unsigned i = FirstResult, e = ResultToks.size(); i != e; ++i) {
- Token &Tok = ResultToks[i];
+ for (Token &Tok : llvm::make_range(ResultToks.begin() + FirstResult,
+ ResultToks.end())) {
if (Tok.is(tok::hashhash))
Tok.setKind(tok::unknown);
}
@@ -333,9 +333,8 @@ void TokenLexer::ExpandFunctionArguments() {
// If the '##' came from expanding an argument, turn it into 'unknown'
// to avoid pasting.
- for (unsigned i = ResultToks.size() - NumToks, e = ResultToks.size();
- i != e; ++i) {
- Token &Tok = ResultToks[i];
+ for (Token &Tok : llvm::make_range(ResultToks.end() - NumToks,
+ ResultToks.end())) {
if (Tok.is(tok::hashhash))
Tok.setKind(tok::unknown);
}
diff --git a/lib/Parse/ParseAST.cpp b/lib/Parse/ParseAST.cpp
index 1fb57a08c433..d018d4c08ed9 100644
--- a/lib/Parse/ParseAST.cpp
+++ b/lib/Parse/ParseAST.cpp
@@ -138,26 +138,18 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
S.getPreprocessor().EnterMainSourceFile();
P.Initialize();
- // C11 6.9p1 says translation units must have at least one top-level
- // declaration. C++ doesn't have this restriction. We also don't want to
- // complain if we have a precompiled header, although technically if the PCH
- // is empty we should still emit the (pedantic) diagnostic.
Parser::DeclGroupPtrTy ADecl;
ExternalASTSource *External = S.getASTContext().getExternalSource();
if (External)
External->StartTranslationUnit(Consumer);
- if (P.ParseTopLevelDecl(ADecl)) {
- if (!External && !S.getLangOpts().CPlusPlus)
- P.Diag(diag::ext_empty_translation_unit);
- } else {
- do {
- // If we got a null return and something *was* parsed, ignore it. This
- // is due to a top-level semicolon, an action override, or a parse error
- // skipping something.
- if (ADecl && !Consumer->HandleTopLevelDecl(ADecl.get()))
- return;
- } while (!P.ParseTopLevelDecl(ADecl));
+ for (bool AtEOF = P.ParseFirstTopLevelDecl(ADecl); !AtEOF;
+ AtEOF = P.ParseTopLevelDecl(ADecl)) {
+ // If we got a null return and something *was* parsed, ignore it. This
+ // is due to a top-level semicolon, an action override, or a parse error
+ // skipping something.
+ if (ADecl && !Consumer->HandleTopLevelDecl(ADecl.get()))
+ return;
}
// Process any TopLevelDecls generated by #pragma weak.
diff --git a/lib/Parse/ParseCXXInlineMethods.cpp b/lib/Parse/ParseCXXInlineMethods.cpp
index 39fcc8270419..c52b61e7e983 100644
--- a/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/lib/Parse/ParseCXXInlineMethods.cpp
@@ -319,7 +319,8 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
// Introduce the parameter into scope.
bool HasUnparsed = Param->hasUnparsedDefaultArg();
Actions.ActOnDelayedCXXMethodParameter(getCurScope(), Param);
- if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
+ std::unique_ptr<CachedTokens> Toks = std::move(LM.DefaultArgs[I].Toks);
+ if (Toks) {
// Mark the end of the default argument so that we know when to stop when
// we parse it later on.
Token LastDefaultArgToken = Toks->back();
@@ -377,9 +378,6 @@ void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
if (Tok.is(tok::eof) && Tok.getEofData() == Param)
ConsumeAnyToken();
-
- delete Toks;
- LM.DefaultArgs[I].Toks = nullptr;
} else if (HasUnparsed) {
assert(Param->hasInheritedDefaultArg());
FunctionDecl *Old = cast<FunctionDecl>(LM.Method)->getPreviousDecl();
@@ -832,22 +830,30 @@ bool Parser::ConsumeAndStoreFunctionPrologue(CachedTokens &Toks) {
}
}
- if (Tok.isOneOf(tok::identifier, tok::kw_template)) {
+ if (Tok.is(tok::identifier)) {
Toks.push_back(Tok);
ConsumeToken();
- } else if (Tok.is(tok::code_completion)) {
- Toks.push_back(Tok);
- ConsumeCodeCompletionToken();
- // Consume the rest of the initializers permissively.
- // FIXME: We should be able to perform code-completion here even if
- // there isn't a subsequent '{' token.
- MightBeTemplateArgument = true;
- break;
} else {
break;
}
} while (Tok.is(tok::coloncolon));
+ if (Tok.is(tok::code_completion)) {
+ Toks.push_back(Tok);
+ ConsumeCodeCompletionToken();
+ if (Tok.isOneOf(tok::identifier, tok::coloncolon, tok::kw_decltype)) {
+ // Could be the start of another member initializer (the ',' has not
+ // been written yet)
+ continue;
+ }
+ }
+
+ if (Tok.is(tok::comma)) {
+ // The initialization is missing, we'll diagnose it later.
+ Toks.push_back(Tok);
+ ConsumeToken();
+ continue;
+ }
if (Tok.is(tok::less))
MightBeTemplateArgument = true;
@@ -888,6 +894,26 @@ bool Parser::ConsumeAndStoreFunctionPrologue(CachedTokens &Toks) {
// means the initializer is malformed; we'll diagnose it later.
if (!getLangOpts().CPlusPlus11)
return false;
+
+ const Token &PreviousToken = Toks[Toks.size() - 2];
+ if (!MightBeTemplateArgument &&
+ !PreviousToken.isOneOf(tok::identifier, tok::greater,
+ tok::greatergreater)) {
+ // If the opening brace is not preceded by one of these tokens, we are
+ // missing the mem-initializer-id. In order to recover better, we need
+ // to use heuristics to determine if this '{' is most likely the
+ // begining of a brace-init-list or the function body.
+ // Check the token after the corresponding '}'.
+ TentativeParsingAction PA(*this);
+ if (SkipUntil(tok::r_brace) &&
+ !Tok.isOneOf(tok::comma, tok::ellipsis, tok::l_brace)) {
+ // Consider there was a malformed initializer and this is the start
+ // of the function body. We'll diagnose it later.
+ PA.Revert();
+ return false;
+ }
+ PA.Revert();
+ }
}
// Grab the initializer (or the subexpression of the template argument).
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index 45e1c3e465ce..ad4005747310 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -25,6 +25,7 @@
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
@@ -301,10 +302,10 @@ unsigned Parser::ParseAttributeArgsCommon(
// Parse the non-empty comma-separated list of expressions.
do {
- std::unique_ptr<EnterExpressionEvaluationContext> Unevaluated;
- if (attributeParsedArgsUnevaluated(*AttrName))
- Unevaluated.reset(
- new EnterExpressionEvaluationContext(Actions, Sema::Unevaluated));
+ bool ShouldEnter = attributeParsedArgsUnevaluated(*AttrName);
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions, Sema::Unevaluated, /*LambdaContextDecl=*/nullptr,
+ /*IsDecltype=*/false, ShouldEnter);
ExprResult ArgExpr(
Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
@@ -366,13 +367,13 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
// These may refer to the function arguments, but need to be parsed early to
// participate in determining whether it's a redeclaration.
- std::unique_ptr<ParseScope> PrototypeScope;
+ llvm::Optional<ParseScope> PrototypeScope;
if (normalizeAttrName(AttrName->getName()) == "enable_if" &&
D && D->isFunctionDeclarator()) {
DeclaratorChunk::FunctionTypeInfo FTI = D->getFunctionTypeInfo();
- PrototypeScope.reset(new ParseScope(this, Scope::FunctionPrototypeScope |
- Scope::FunctionDeclarationScope |
- Scope::DeclScope));
+ PrototypeScope.emplace(this, Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope |
+ Scope::DeclScope);
for (unsigned i = 0; i != FTI.NumParams; ++i) {
ParmVarDecl *Param = cast<ParmVarDecl>(FTI.Params[i].Param);
Actions.ActOnReenterCXXMethodParameter(getCurScope(), Param);
@@ -605,6 +606,7 @@ void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
case tok::kw___fastcall:
case tok::kw___stdcall:
case tok::kw___thiscall:
+ case tok::kw___regcall:
case tok::kw___cdecl:
case tok::kw___vectorcall:
case tok::kw___ptr64:
@@ -1407,39 +1409,53 @@ void Parser::DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs) {
<< attrs.Range;
}
-void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &attrs) {
- AttributeList *AttrList = attrs.getList();
- while (AttrList) {
- if (AttrList->isCXX11Attribute()) {
- Diag(AttrList->getLoc(), diag::err_attribute_not_type_attr)
- << AttrList->getName();
- AttrList->setInvalid();
+void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
+ unsigned DiagID) {
+ for (AttributeList *Attr = Attrs.getList(); Attr; Attr = Attr->getNext()) {
+ if (!Attr->isCXX11Attribute())
+ continue;
+ if (Attr->getKind() == AttributeList::UnknownAttribute)
+ Diag(Attr->getLoc(), diag::warn_unknown_attribute_ignored)
+ << Attr->getName();
+ else {
+ Diag(Attr->getLoc(), DiagID)
+ << Attr->getName();
+ Attr->setInvalid();
}
- AttrList = AttrList->getNext();
}
}
+// Usually, `__attribute__((attrib)) class Foo {} var` means that attribute
+// applies to var, not the type Foo.
// As an exception to the rule, __declspec(align(...)) before the
// class-key affects the type instead of the variable.
-void Parser::handleDeclspecAlignBeforeClassKey(ParsedAttributesWithRange &Attrs,
- DeclSpec &DS,
- Sema::TagUseKind TUK) {
+// Also, Microsoft-style [attributes] seem to affect the type instead of the
+// variable.
+// This function moves attributes that should apply to the type off DS to Attrs.
+void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
+ DeclSpec &DS,
+ Sema::TagUseKind TUK) {
if (TUK == Sema::TUK_Reference)
return;
ParsedAttributes &PA = DS.getAttributes();
AttributeList *AL = PA.getList();
AttributeList *Prev = nullptr;
+ AttributeList *TypeAttrHead = nullptr;
+ AttributeList *TypeAttrTail = nullptr;
while (AL) {
AttributeList *Next = AL->getNext();
- // We only consider attributes using the appropriate '__declspec' spelling.
- // This behavior doesn't extend to any other spellings.
- if (AL->getKind() == AttributeList::AT_Aligned &&
- AL->isDeclspecAttribute()) {
+ if ((AL->getKind() == AttributeList::AT_Aligned &&
+ AL->isDeclspecAttribute()) ||
+ AL->isMicrosoftAttribute()) {
// Stitch the attribute into the tag's attribute list.
- AL->setNext(nullptr);
- Attrs.add(AL);
+ if (TypeAttrTail)
+ TypeAttrTail->setNext(AL);
+ else
+ TypeAttrHead = AL;
+ TypeAttrTail = AL;
+ TypeAttrTail->setNext(nullptr);
// Remove the attribute from the variable's attribute list.
if (Prev) {
@@ -1457,6 +1473,12 @@ void Parser::handleDeclspecAlignBeforeClassKey(ParsedAttributesWithRange &Attrs,
AL = Next;
}
+
+ // Find end of type attributes Attrs and add NewTypeAttributes in the same
+ // order they were in originally. (Remember, in AttributeList things earlier
+ // in source order are later in the list, since new attributes are added to
+ // the front of the list.)
+ Attrs.addAllAtEnd(TypeAttrHead);
}
/// ParseDeclaration - Parse a full 'declaration', which consists of
@@ -1484,7 +1506,6 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
ObjCDeclContextSwitch ObjCDC(*this);
Decl *SingleDecl = nullptr;
- Decl *OwnedType = nullptr;
switch (Tok.getKind()) {
case tok::kw_template:
case tok::kw_export:
@@ -1504,9 +1525,8 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
ProhibitAttributes(attrs);
return ParseNamespace(Context, DeclEnd);
case tok::kw_using:
- SingleDecl = ParseUsingDirectiveOrDeclaration(Context, ParsedTemplateInfo(),
- DeclEnd, attrs, &OwnedType);
- break;
+ return ParseUsingDirectiveOrDeclaration(Context, ParsedTemplateInfo(),
+ DeclEnd, attrs);
case tok::kw_static_assert:
case tok::kw__Static_assert:
ProhibitAttributes(attrs);
@@ -1517,9 +1537,8 @@ Parser::DeclGroupPtrTy Parser::ParseDeclaration(unsigned Context,
}
// This routine returns a DeclGroup, if the thing we parsed only contains a
- // single decl, convert it now. Alias declarations can also declare a type;
- // include that too if it is present.
- return Actions.ConvertDeclToDeclGroup(SingleDecl, OwnedType);
+ // single decl, convert it now.
+ return Actions.ConvertDeclToDeclGroup(SingleDecl);
}
/// simple-declaration: [C99 6.7: declaration] [C++ 7p1: dcl.dcl]
@@ -2717,7 +2736,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Reject C++11 attributes that appertain to decl specifiers as
// we don't support any C++11 attributes that appertain to decl
// specifiers. This also conforms to what g++ 4.8 is doing.
- ProhibitCXX11Attributes(attrs);
+ ProhibitCXX11Attributes(attrs, diag::err_attribute_not_type_attr);
DS.takeAttributesFrom(attrs);
}
@@ -3117,6 +3136,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___regcall:
case tok::kw___vectorcall:
ParseMicrosoftTypeAttributes(DS.getAttributes());
continue;
@@ -4067,7 +4087,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return;
}
- handleDeclspecAlignBeforeClassKey(attrs, DS, TUK);
+ stripTypeAttributesOffDeclSpec(attrs, DS, TUK);
Sema::SkipBodyInfo SkipBody;
if (!Name && TUK == Sema::TUK_Definition && Tok.is(tok::l_brace) &&
@@ -4169,7 +4189,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
// C does not allow an empty enumerator-list, C++ does [dcl.enum].
if (Tok.is(tok::r_brace) && !getLangOpts().CPlusPlus)
- Diag(Tok, diag::error_empty_enum);
+ Diag(Tok, diag::err_empty_enum);
SmallVector<Decl *, 32> EnumConstantDecls;
SmallVector<SuppressAccessChecks, 32> EnumAvailabilityDiags;
@@ -4224,7 +4244,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
if (Tok.is(tok::identifier)) {
// We're missing a comma between enumerators.
- SourceLocation Loc = PP.getLocForEndOfToken(PrevTokLocation);
+ SourceLocation Loc = getEndOfPreviousToken();
Diag(Loc, diag::err_enumerator_list_missing_comma)
<< FixItHint::CreateInsertion(Loc, ", ");
continue;
@@ -4434,6 +4454,7 @@ bool Parser::isTypeSpecifierQualifier() {
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___regcall:
case tok::kw___vectorcall:
case tok::kw___w64:
case tok::kw___ptr64:
@@ -4618,6 +4639,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___regcall:
case tok::kw___vectorcall:
case tok::kw___w64:
case tok::kw___sptr:
@@ -4856,6 +4878,7 @@ void Parser::ParseTypeQualifierListOpt(DeclSpec &DS, unsigned AttrReqs,
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___regcall:
case tok::kw___vectorcall:
if (AttrReqs & AR_DeclspecAttributesParsed) {
ParseMicrosoftTypeAttributes(DS.getAttributes());
@@ -5200,12 +5223,22 @@ static SourceLocation getMissingDeclaratorIdLoc(Declarator &D,
/// '~' class-name
/// template-id
///
+/// C++17 adds the following, which we also handle here:
+///
+/// simple-declaration:
+/// <decl-spec> '[' identifier-list ']' brace-or-equal-initializer ';'
+///
/// Note, any additional constructs added here may need corresponding changes
/// in isConstructorDeclarator.
void Parser::ParseDirectDeclarator(Declarator &D) {
DeclaratorScopeObj DeclScopeObj(*this, D.getCXXScopeSpec());
if (getLangOpts().CPlusPlus && D.mayHaveIdentifier()) {
+ // This might be a C++17 structured binding.
+ if (Tok.is(tok::l_square) && !D.mayOmitIdentifier() &&
+ D.getCXXScopeSpec().isEmpty())
+ return ParseDecompositionDeclarator(D);
+
// Don't parse FOO:BAR as if it were a typo for FOO::BAR inside a class, in
// this context it is a bitfield. Also in range-based for statement colon
// may delimit for-range-declaration.
@@ -5228,6 +5261,14 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
// Change the declaration context for name lookup, until this function
// is exited (and the declarator has been parsed).
DeclScopeObj.EnterDeclaratorScope();
+ else if (getObjCDeclContext()) {
+ // Ensure that we don't interpret the next token as an identifier when
+ // dealing with declarations in an Objective-C container.
+ D.SetIdentifier(nullptr, Tok.getLocation());
+ D.setInvalidType(true);
+ ConsumeToken();
+ goto PastIdentifier;
+ }
}
// C++0x [dcl.fct]p14:
@@ -5435,6 +5476,70 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
}
}
+void Parser::ParseDecompositionDeclarator(Declarator &D) {
+ assert(Tok.is(tok::l_square));
+
+ // If this doesn't look like a structured binding, maybe it's a misplaced
+ // array declarator.
+ // FIXME: Consume the l_square first so we don't need extra lookahead for
+ // this.
+ if (!(NextToken().is(tok::identifier) &&
+ GetLookAheadToken(2).isOneOf(tok::comma, tok::r_square)) &&
+ !(NextToken().is(tok::r_square) &&
+ GetLookAheadToken(2).isOneOf(tok::equal, tok::l_brace)))
+ return ParseMisplacedBracketDeclarator(D);
+
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+
+ SmallVector<DecompositionDeclarator::Binding, 32> Bindings;
+ while (Tok.isNot(tok::r_square)) {
+ if (!Bindings.empty()) {
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ else {
+ if (Tok.is(tok::identifier)) {
+ SourceLocation EndLoc = getEndOfPreviousToken();
+ Diag(EndLoc, diag::err_expected)
+ << tok::comma << FixItHint::CreateInsertion(EndLoc, ",");
+ } else {
+ Diag(Tok, diag::err_expected_comma_or_rsquare);
+ }
+
+ SkipUntil(tok::r_square, tok::comma, tok::identifier,
+ StopAtSemi | StopBeforeMatch);
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ else if (Tok.isNot(tok::identifier))
+ break;
+ }
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected) << tok::identifier;
+ break;
+ }
+
+ Bindings.push_back({Tok.getIdentifierInfo(), Tok.getLocation()});
+ ConsumeToken();
+ }
+
+ if (Tok.isNot(tok::r_square))
+ // We've already diagnosed a problem here.
+ T.skipToEnd();
+ else {
+ // C++17 does not allow the identifier-list in a structured binding
+ // to be empty.
+ if (Bindings.empty())
+ Diag(Tok.getLocation(), diag::ext_decomp_decl_empty);
+
+ T.consumeClose();
+ }
+
+ return D.setDecompositionBindings(T.getOpenLocation(), Bindings,
+ T.getCloseLocation());
+}
+
/// ParseParenDeclarator - We parsed the declarator D up to a paren. This is
/// only called before the identifier, so these are most likely just grouping
/// parens for precedence. If we find that these are actually function
@@ -5719,6 +5824,21 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
}
}
+ // Collect non-parameter declarations from the prototype if this is a function
+ // declaration. They will be moved into the scope of the function. Only do
+ // this in C and not C++, where the decls will continue to live in the
+ // surrounding context.
+ SmallVector<NamedDecl *, 0> DeclsInPrototype;
+ if (getCurScope()->getFlags() & Scope::FunctionDeclarationScope &&
+ !getLangOpts().CPlusPlus) {
+ for (Decl *D : getCurScope()->decls()) {
+ NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ if (!ND || isa<ParmVarDecl>(ND))
+ continue;
+ DeclsInPrototype.push_back(ND);
+ }
+ }
+
// Remember that we parsed a function type, and remember the attributes.
D.AddTypeInfo(DeclaratorChunk::getFunction(HasProto,
IsAmbiguous,
@@ -5738,6 +5858,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
NoexceptExpr.isUsable() ?
NoexceptExpr.get() : nullptr,
ExceptionSpecTokens,
+ DeclsInPrototype,
StartLoc, LocalEndLoc, D,
TrailingReturnType),
FnAttrs, EndLoc);
@@ -5783,7 +5904,8 @@ bool Parser::isFunctionDeclaratorIdentifierList() {
// To handle this, we check to see if the token after the first
// identifier is a "," or ")". Only then do we parse it as an
// identifier list.
- && (NextToken().is(tok::comma) || NextToken().is(tok::r_paren));
+ && (!Tok.is(tok::eof) &&
+ (NextToken().is(tok::comma) || NextToken().is(tok::r_paren)));
}
/// ParseFunctionDeclaratorIdentifierList - While parsing a function declarator
@@ -5921,7 +6043,7 @@ void Parser::ParseParameterDeclarationClause(
// DefArgToks is used when the parsing of default arguments needs
// to be delayed.
- CachedTokens *DefArgToks = nullptr;
+ std::unique_ptr<CachedTokens> DefArgToks;
// If no parameter was specified, verify that *something* was specified,
// otherwise we have a missing type and identifier.
@@ -5957,13 +6079,11 @@ void Parser::ParseParameterDeclarationClause(
// If we're inside a class definition, cache the tokens
// corresponding to the default argument. We'll actually parse
// them when we see the end of the class definition.
- // FIXME: Can we use a smart pointer for Toks?
- DefArgToks = new CachedTokens;
+ DefArgToks.reset(new CachedTokens);
SourceLocation ArgStartLoc = NextToken().getLocation();
if (!ConsumeAndStoreInitializer(*DefArgToks, CIK_DefaultArgument)) {
- delete DefArgToks;
- DefArgToks = nullptr;
+ DefArgToks.reset();
Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
} else {
Actions.ActOnParamUnparsedDefaultArgument(Param, EqualLoc,
@@ -5999,7 +6119,7 @@ void Parser::ParseParameterDeclarationClause(
ParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
ParmDeclarator.getIdentifierLoc(),
- Param, DefArgToks));
+ Param, std::move(DefArgToks)));
}
if (TryConsumeToken(tok::ellipsis, EllipsisLoc)) {
@@ -6149,8 +6269,7 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
T.consumeClose();
- ParsedAttributes attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
+ MaybeParseCXX11Attributes(DS.getAttributes());
// Remember that we parsed a array type, and remember its features.
D.AddTypeInfo(DeclaratorChunk::getArray(DS.getTypeQualifiers(),
@@ -6158,7 +6277,7 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
NumElements.get(),
T.getOpenLocation(),
T.getCloseLocation()),
- attrs, T.getCloseLocation());
+ DS.getAttributes(), T.getCloseLocation());
}
/// Diagnose brackets before an identifier.
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index 6436e3dfc763..4002b09d2bc4 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -217,7 +217,6 @@ void Parser::ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc,
Tok.isNot(tok::eof)) {
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
- MaybeParseMicrosoftAttributes(attrs);
ParseExternalDeclaration(attrs);
}
@@ -310,7 +309,6 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, unsigned Context) {
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
- MaybeParseMicrosoftAttributes(attrs);
if (Tok.isNot(tok::l_brace)) {
// Reset the source range in DS, as the leading "extern"
@@ -361,7 +359,6 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, unsigned Context) {
default:
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
- MaybeParseMicrosoftAttributes(attrs);
ParseExternalDeclaration(attrs);
continue;
}
@@ -375,13 +372,60 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, unsigned Context) {
: nullptr;
}
+/// Parse a C++ Modules TS export-declaration.
+///
+/// export-declaration:
+/// 'export' declaration
+/// 'export' '{' declaration-seq[opt] '}'
+///
+Decl *Parser::ParseExportDeclaration() {
+ assert(Tok.is(tok::kw_export));
+ SourceLocation ExportLoc = ConsumeToken();
+
+ ParseScope ExportScope(this, Scope::DeclScope);
+ Decl *ExportDecl = Actions.ActOnStartExportDecl(
+ getCurScope(), ExportLoc,
+ Tok.is(tok::l_brace) ? Tok.getLocation() : SourceLocation());
+
+ if (Tok.isNot(tok::l_brace)) {
+ // FIXME: Factor out a ParseExternalDeclarationWithAttrs.
+ ParsedAttributesWithRange Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(Attrs);
+ MaybeParseMicrosoftAttributes(Attrs);
+ ParseExternalDeclaration(Attrs);
+ return Actions.ActOnFinishExportDecl(getCurScope(), ExportDecl,
+ SourceLocation());
+ }
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+
+ // The Modules TS draft says "An export-declaration shall declare at least one
+ // entity", but the intent is that it shall contain at least one declaration.
+ if (Tok.is(tok::r_brace))
+ Diag(ExportLoc, diag::err_export_empty)
+ << SourceRange(ExportLoc, Tok.getLocation());
+
+ while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
+ Tok.isNot(tok::eof)) {
+ ParsedAttributesWithRange Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(Attrs);
+ MaybeParseMicrosoftAttributes(Attrs);
+ ParseExternalDeclaration(Attrs);
+ }
+
+ T.consumeClose();
+ return Actions.ActOnFinishExportDecl(getCurScope(), ExportDecl,
+ T.getCloseLocation());
+}
+
/// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or
/// using-directive. Assumes that current token is 'using'.
-Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
+Parser::DeclGroupPtrTy
+Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
const ParsedTemplateInfo &TemplateInfo,
- SourceLocation &DeclEnd,
- ParsedAttributesWithRange &attrs,
- Decl **OwnedType) {
+ SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &attrs) {
assert(Tok.is(tok::kw_using) && "Not using token");
ObjCDeclContextSwitch ObjCDC(*this);
@@ -403,7 +447,8 @@ Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
<< 0 /* directive */ << R << FixItHint::CreateRemoval(R);
}
- return ParseUsingDirective(Context, UsingLoc, DeclEnd, attrs);
+ Decl *UsingDir = ParseUsingDirective(Context, UsingLoc, DeclEnd, attrs);
+ return Actions.ConvertDeclToDeclGroup(UsingDir);
}
// Otherwise, it must be a using-declaration or an alias-declaration.
@@ -412,7 +457,7 @@ Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
ProhibitAttributes(attrs);
return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd,
- AS_none, OwnedType);
+ AS_none);
}
/// ParseUsingDirective - Parse C++ using-directive, assumes
@@ -478,58 +523,31 @@ Decl *Parser::ParseUsingDirective(unsigned Context,
IdentLoc, NamespcName, attrs.getList());
}
-/// ParseUsingDeclaration - Parse C++ using-declaration or alias-declaration.
-/// Assumes that 'using' was already seen.
-///
-/// using-declaration: [C++ 7.3.p3: namespace.udecl]
-/// 'using' 'typename'[opt] ::[opt] nested-name-specifier
-/// unqualified-id
-/// 'using' :: unqualified-id
+/// Parse a using-declarator (or the identifier in a C++11 alias-declaration).
///
-/// alias-declaration: C++11 [dcl.dcl]p1
-/// 'using' identifier attribute-specifier-seq[opt] = type-id ;
+/// using-declarator:
+/// 'typename'[opt] nested-name-specifier unqualified-id
///
-Decl *Parser::ParseUsingDeclaration(unsigned Context,
- const ParsedTemplateInfo &TemplateInfo,
- SourceLocation UsingLoc,
- SourceLocation &DeclEnd,
- AccessSpecifier AS,
- Decl **OwnedType) {
- CXXScopeSpec SS;
- SourceLocation TypenameLoc;
- bool HasTypenameKeyword = false;
-
- // Check for misplaced attributes before the identifier in an
- // alias-declaration.
- ParsedAttributesWithRange MisplacedAttrs(AttrFactory);
- MaybeParseCXX11Attributes(MisplacedAttrs);
+bool Parser::ParseUsingDeclarator(unsigned Context, UsingDeclarator &D) {
+ D.clear();
// Ignore optional 'typename'.
// FIXME: This is wrong; we should parse this as a typename-specifier.
- if (TryConsumeToken(tok::kw_typename, TypenameLoc))
- HasTypenameKeyword = true;
+ TryConsumeToken(tok::kw_typename, D.TypenameLoc);
if (Tok.is(tok::kw___super)) {
Diag(Tok.getLocation(), diag::err_super_in_using_declaration);
- SkipUntil(tok::semi);
- return nullptr;
+ return true;
}
// Parse nested-name-specifier.
IdentifierInfo *LastII = nullptr;
- ParseOptionalCXXScopeSpecifier(SS, nullptr, /*EnteringContext=*/false,
+ ParseOptionalCXXScopeSpecifier(D.SS, nullptr, /*EnteringContext=*/false,
/*MayBePseudoDtor=*/nullptr,
/*IsTypename=*/false,
/*LastII=*/&LastII);
-
- // Check nested-name specifier.
- if (SS.isInvalid()) {
- SkipUntil(tok::semi);
- return nullptr;
- }
-
- SourceLocation TemplateKWLoc;
- UnqualifiedId Name;
+ if (D.SS.isInvalid())
+ return true;
// Parse the unqualified-id. We allow parsing of both constructor and
// destructor names and allow the action module to diagnose any semantic
@@ -542,32 +560,74 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
// nested-name-specifier, the name is [...] considered to name the
// constructor.
if (getLangOpts().CPlusPlus11 && Context == Declarator::MemberContext &&
- Tok.is(tok::identifier) && NextToken().is(tok::semi) &&
- SS.isNotEmpty() && LastII == Tok.getIdentifierInfo() &&
- !SS.getScopeRep()->getAsNamespace() &&
- !SS.getScopeRep()->getAsNamespaceAlias()) {
+ Tok.is(tok::identifier) &&
+ (NextToken().is(tok::semi) || NextToken().is(tok::comma) ||
+ NextToken().is(tok::ellipsis)) &&
+ D.SS.isNotEmpty() && LastII == Tok.getIdentifierInfo() &&
+ !D.SS.getScopeRep()->getAsNamespace() &&
+ !D.SS.getScopeRep()->getAsNamespaceAlias()) {
SourceLocation IdLoc = ConsumeToken();
- ParsedType Type = Actions.getInheritingConstructorName(SS, IdLoc, *LastII);
- Name.setConstructorName(Type, IdLoc, IdLoc);
- } else if (ParseUnqualifiedId(
- SS, /*EnteringContext=*/false,
- /*AllowDestructorName=*/true,
- /*AllowConstructorName=*/!(Tok.is(tok::identifier) &&
- NextToken().is(tok::equal)),
- nullptr, TemplateKWLoc, Name)) {
- SkipUntil(tok::semi);
- return nullptr;
+ ParsedType Type =
+ Actions.getInheritingConstructorName(D.SS, IdLoc, *LastII);
+ D.Name.setConstructorName(Type, IdLoc, IdLoc);
+ } else {
+ if (ParseUnqualifiedId(
+ D.SS, /*EnteringContext=*/false,
+ /*AllowDestructorName=*/true,
+ /*AllowConstructorName=*/!(Tok.is(tok::identifier) &&
+ NextToken().is(tok::equal)),
+ nullptr, D.TemplateKWLoc, D.Name))
+ return true;
}
+ if (TryConsumeToken(tok::ellipsis, D.EllipsisLoc))
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus1z ?
+ diag::warn_cxx1z_compat_using_declaration_pack :
+ diag::ext_using_declaration_pack);
+
+ return false;
+}
+
+/// ParseUsingDeclaration - Parse C++ using-declaration or alias-declaration.
+/// Assumes that 'using' was already seen.
+///
+/// using-declaration: [C++ 7.3.p3: namespace.udecl]
+/// 'using' using-declarator-list[opt] ;
+///
+/// using-declarator-list: [C++1z]
+/// using-declarator '...'[opt]
+/// using-declarator-list ',' using-declarator '...'[opt]
+///
+/// using-declarator-list: [C++98-14]
+/// using-declarator
+///
+/// alias-declaration: C++11 [dcl.dcl]p1
+/// 'using' identifier attribute-specifier-seq[opt] = type-id ;
+///
+Parser::DeclGroupPtrTy
+Parser::ParseUsingDeclaration(unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation UsingLoc, SourceLocation &DeclEnd,
+ AccessSpecifier AS) {
+ // Check for misplaced attributes before the identifier in an
+ // alias-declaration.
+ ParsedAttributesWithRange MisplacedAttrs(AttrFactory);
+ MaybeParseCXX11Attributes(MisplacedAttrs);
+
+ UsingDeclarator D;
+ bool InvalidDeclarator = ParseUsingDeclarator(Context, D);
+
ParsedAttributesWithRange Attrs(AttrFactory);
MaybeParseGNUAttributes(Attrs);
MaybeParseCXX11Attributes(Attrs);
// Maybe this is an alias-declaration.
- TypeResult TypeAlias;
- bool IsAliasDecl = Tok.is(tok::equal);
- Decl *DeclFromDeclSpec = nullptr;
- if (IsAliasDecl) {
+ if (Tok.is(tok::equal)) {
+ if (InvalidDeclarator) {
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+
// If we had any misplaced attributes from earlier, this is where they
// should have been written.
if (MisplacedAttrs.Range.isValid()) {
@@ -579,109 +639,156 @@ Decl *Parser::ParseUsingDeclaration(unsigned Context,
Attrs.takeAllFrom(MisplacedAttrs);
}
- ConsumeToken();
+ Decl *DeclFromDeclSpec = nullptr;
+ Decl *AD = ParseAliasDeclarationAfterDeclarator(
+ TemplateInfo, UsingLoc, D, DeclEnd, AS, Attrs, &DeclFromDeclSpec);
+ return Actions.ConvertDeclToDeclGroup(AD, DeclFromDeclSpec);
+ }
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_alias_declaration :
- diag::ext_alias_declaration);
-
- // Type alias templates cannot be specialized.
- int SpecKind = -1;
- if (TemplateInfo.Kind == ParsedTemplateInfo::Template &&
- Name.getKind() == UnqualifiedId::IK_TemplateId)
- SpecKind = 0;
- if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization)
- SpecKind = 1;
- if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
- SpecKind = 2;
- if (SpecKind != -1) {
- SourceRange Range;
- if (SpecKind == 0)
- Range = SourceRange(Name.TemplateId->LAngleLoc,
- Name.TemplateId->RAngleLoc);
- else
- Range = TemplateInfo.getSourceRange();
- Diag(Range.getBegin(), diag::err_alias_declaration_specialization)
- << SpecKind << Range;
- SkipUntil(tok::semi);
- return nullptr;
- }
+ // C++11 attributes are not allowed on a using-declaration, but GNU ones
+ // are.
+ ProhibitAttributes(MisplacedAttrs);
+ ProhibitAttributes(Attrs);
- // Name must be an identifier.
- if (Name.getKind() != UnqualifiedId::IK_Identifier) {
- Diag(Name.StartLocation, diag::err_alias_declaration_not_identifier);
- // No removal fixit: can't recover from this.
- SkipUntil(tok::semi);
- return nullptr;
- } else if (HasTypenameKeyword)
- Diag(TypenameLoc, diag::err_alias_declaration_not_identifier)
- << FixItHint::CreateRemoval(SourceRange(TypenameLoc,
- SS.isNotEmpty() ? SS.getEndLoc() : TypenameLoc));
- else if (SS.isNotEmpty())
- Diag(SS.getBeginLoc(), diag::err_alias_declaration_not_identifier)
- << FixItHint::CreateRemoval(SS.getRange());
+ // Diagnose an attempt to declare a templated using-declaration.
+ // In C++11, alias-declarations can be templates:
+ // template <...> using id = type;
+ if (TemplateInfo.Kind) {
+ SourceRange R = TemplateInfo.getSourceRange();
+ Diag(UsingLoc, diag::err_templated_using_directive_declaration)
+ << 1 /* declaration */ << R << FixItHint::CreateRemoval(R);
- TypeAlias = ParseTypeName(nullptr, TemplateInfo.Kind
- ? Declarator::AliasTemplateContext
- : Declarator::AliasDeclContext,
- AS, &DeclFromDeclSpec, &Attrs);
- if (OwnedType)
- *OwnedType = DeclFromDeclSpec;
- } else {
- // C++11 attributes are not allowed on a using-declaration, but GNU ones
- // are.
- ProhibitAttributes(MisplacedAttrs);
- ProhibitAttributes(Attrs);
+ // Unfortunately, we have to bail out instead of recovering by
+ // ignoring the parameters, just in case the nested name specifier
+ // depends on the parameters.
+ return nullptr;
+ }
+ SmallVector<Decl *, 8> DeclsInGroup;
+ while (true) {
// Parse (optional) attributes (most likely GNU strong-using extension).
MaybeParseGNUAttributes(Attrs);
+
+ if (InvalidDeclarator)
+ SkipUntil(tok::comma, tok::semi, StopBeforeMatch);
+ else {
+ // "typename" keyword is allowed for identifiers only,
+ // because it may be a type definition.
+ if (D.TypenameLoc.isValid() &&
+ D.Name.getKind() != UnqualifiedId::IK_Identifier) {
+ Diag(D.Name.getSourceRange().getBegin(),
+ diag::err_typename_identifiers_only)
+ << FixItHint::CreateRemoval(SourceRange(D.TypenameLoc));
+ // Proceed parsing, but discard the typename keyword.
+ D.TypenameLoc = SourceLocation();
+ }
+
+ Decl *UD = Actions.ActOnUsingDeclaration(getCurScope(), AS, UsingLoc,
+ D.TypenameLoc, D.SS, D.Name,
+ D.EllipsisLoc, Attrs.getList());
+ if (UD)
+ DeclsInGroup.push_back(UD);
+ }
+
+ if (!TryConsumeToken(tok::comma))
+ break;
+
+ // Parse another using-declarator.
+ Attrs.clear();
+ InvalidDeclarator = ParseUsingDeclarator(Context, D);
}
+ if (DeclsInGroup.size() > 1)
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus1z ?
+ diag::warn_cxx1z_compat_multi_using_declaration :
+ diag::ext_multi_using_declaration);
+
// Eat ';'.
DeclEnd = Tok.getLocation();
if (ExpectAndConsume(tok::semi, diag::err_expected_after,
!Attrs.empty() ? "attributes list"
- : IsAliasDecl ? "alias declaration"
- : "using declaration"))
+ : "using declaration"))
SkipUntil(tok::semi);
- // Diagnose an attempt to declare a templated using-declaration.
- // In C++11, alias-declarations can be templates:
- // template <...> using id = type;
- if (TemplateInfo.Kind && !IsAliasDecl) {
- SourceRange R = TemplateInfo.getSourceRange();
- Diag(UsingLoc, diag::err_templated_using_directive_declaration)
- << 1 /* declaration */ << R << FixItHint::CreateRemoval(R);
+ return Actions.BuildDeclaratorGroup(DeclsInGroup, /*MayContainAuto*/false);
+}
- // Unfortunately, we have to bail out instead of recovering by
- // ignoring the parameters, just in case the nested name specifier
- // depends on the parameters.
+Decl *Parser::ParseAliasDeclarationAfterDeclarator(
+ const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
+ UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
+ ParsedAttributes &Attrs, Decl **OwnedType) {
+ if (ExpectAndConsume(tok::equal)) {
+ SkipUntil(tok::semi);
return nullptr;
}
- // "typename" keyword is allowed for identifiers only,
- // because it may be a type definition.
- if (HasTypenameKeyword && Name.getKind() != UnqualifiedId::IK_Identifier) {
- Diag(Name.getSourceRange().getBegin(), diag::err_typename_identifiers_only)
- << FixItHint::CreateRemoval(SourceRange(TypenameLoc));
- // Proceed parsing, but reset the HasTypenameKeyword flag.
- HasTypenameKeyword = false;
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus11 ?
+ diag::warn_cxx98_compat_alias_declaration :
+ diag::ext_alias_declaration);
+
+ // Type alias templates cannot be specialized.
+ int SpecKind = -1;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::Template &&
+ D.Name.getKind() == UnqualifiedId::IK_TemplateId)
+ SpecKind = 0;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization)
+ SpecKind = 1;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
+ SpecKind = 2;
+ if (SpecKind != -1) {
+ SourceRange Range;
+ if (SpecKind == 0)
+ Range = SourceRange(D.Name.TemplateId->LAngleLoc,
+ D.Name.TemplateId->RAngleLoc);
+ else
+ Range = TemplateInfo.getSourceRange();
+ Diag(Range.getBegin(), diag::err_alias_declaration_specialization)
+ << SpecKind << Range;
+ SkipUntil(tok::semi);
+ return nullptr;
}
- if (IsAliasDecl) {
- TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
- MultiTemplateParamsArg TemplateParamsArg(
- TemplateParams ? TemplateParams->data() : nullptr,
- TemplateParams ? TemplateParams->size() : 0);
- return Actions.ActOnAliasDeclaration(getCurScope(), AS, TemplateParamsArg,
- UsingLoc, Name, Attrs.getList(),
- TypeAlias, DeclFromDeclSpec);
- }
+ // Name must be an identifier.
+ if (D.Name.getKind() != UnqualifiedId::IK_Identifier) {
+ Diag(D.Name.StartLocation, diag::err_alias_declaration_not_identifier);
+ // No removal fixit: can't recover from this.
+ SkipUntil(tok::semi);
+ return nullptr;
+ } else if (D.TypenameLoc.isValid())
+ Diag(D.TypenameLoc, diag::err_alias_declaration_not_identifier)
+ << FixItHint::CreateRemoval(SourceRange(
+ D.TypenameLoc,
+ D.SS.isNotEmpty() ? D.SS.getEndLoc() : D.TypenameLoc));
+ else if (D.SS.isNotEmpty())
+ Diag(D.SS.getBeginLoc(), diag::err_alias_declaration_not_identifier)
+ << FixItHint::CreateRemoval(D.SS.getRange());
+ if (D.EllipsisLoc.isValid())
+ Diag(D.EllipsisLoc, diag::err_alias_declaration_pack_expansion)
+ << FixItHint::CreateRemoval(SourceRange(D.EllipsisLoc));
+
+ Decl *DeclFromDeclSpec = nullptr;
+ TypeResult TypeAlias =
+ ParseTypeName(nullptr,
+ TemplateInfo.Kind ? Declarator::AliasTemplateContext
+ : Declarator::AliasDeclContext,
+ AS, &DeclFromDeclSpec, &Attrs);
+ if (OwnedType)
+ *OwnedType = DeclFromDeclSpec;
- return Actions.ActOnUsingDeclaration(getCurScope(), AS,
- /* HasUsingKeyword */ true, UsingLoc,
- SS, Name, Attrs.getList(),
- HasTypenameKeyword, TypenameLoc);
+ // Eat ';'.
+ DeclEnd = Tok.getLocation();
+ if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+ !Attrs.empty() ? "attributes list"
+ : "alias declaration"))
+ SkipUntil(tok::semi);
+
+ TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
+ MultiTemplateParamsArg TemplateParamsArg(
+ TemplateParams ? TemplateParams->data() : nullptr,
+ TemplateParams ? TemplateParams->size() : 0);
+ return Actions.ActOnAliasDeclaration(getCurScope(), AS, TemplateParamsArg,
+ UsingLoc, D.Name, Attrs.getList(),
+ TypeAlias, DeclFromDeclSpec);
}
/// ParseStaticAssertDeclaration - Parse C++0x or C11 static_assert-declaration.
@@ -1742,7 +1849,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TParams =
MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
- handleDeclspecAlignBeforeClassKey(attrs, DS, TUK);
+ stripTypeAttributesOffDeclSpec(attrs, DS, TUK);
// Declaration or definition of a class type
TagOrTempResult = Actions.ActOnTag(getCurScope(), TagType, TUK, StartLoc,
@@ -1995,7 +2102,8 @@ void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
LateMethod->DefaultArgs.reserve(FTI.NumParams);
for (unsigned ParamIdx = 0; ParamIdx < FTI.NumParams; ++ParamIdx)
LateMethod->DefaultArgs.push_back(LateParsedDefaultArgument(
- FTI.Params[ParamIdx].Param, FTI.Params[ParamIdx].DefaultArgTokens));
+ FTI.Params[ParamIdx].Param,
+ std::move(FTI.Params[ParamIdx].DefaultArgTokens)));
}
}
@@ -2005,6 +2113,7 @@ void Parser::HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
/// virt-specifier:
/// override
/// final
+/// __final
VirtSpecifiers::Specifier Parser::isCXX11VirtSpecifier(const Token &Tok) const {
if (!getLangOpts().CPlusPlus || Tok.isNot(tok::identifier))
return VirtSpecifiers::VS_None;
@@ -2014,6 +2123,8 @@ VirtSpecifiers::Specifier Parser::isCXX11VirtSpecifier(const Token &Tok) const {
// Initialize the contextual keywords.
if (!Ident_final) {
Ident_final = &PP.getIdentifierTable().get("final");
+ if (getLangOpts().GNUKeywords)
+ Ident_GNU_final = &PP.getIdentifierTable().get("__final");
if (getLangOpts().MicrosoftExt)
Ident_sealed = &PP.getIdentifierTable().get("sealed");
Ident_override = &PP.getIdentifierTable().get("override");
@@ -2028,6 +2139,9 @@ VirtSpecifiers::Specifier Parser::isCXX11VirtSpecifier(const Token &Tok) const {
if (II == Ident_final)
return VirtSpecifiers::VS_Final;
+ if (II == Ident_GNU_final)
+ return VirtSpecifiers::VS_GNU_Final;
+
return VirtSpecifiers::VS_None;
}
@@ -2067,6 +2181,8 @@ void Parser::ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS,
<< VirtSpecifiers::getSpecifierName(Specifier);
} else if (Specifier == VirtSpecifiers::VS_Sealed) {
Diag(Tok.getLocation(), diag::ext_ms_sealed_keyword);
+ } else if (Specifier == VirtSpecifiers::VS_GNU_Final) {
+ Diag(Tok.getLocation(), diag::ext_warn_gnu_final);
} else {
Diag(Tok.getLocation(),
getLangOpts().CPlusPlus11
@@ -2083,6 +2199,7 @@ void Parser::ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS,
bool Parser::isCXX11FinalKeyword() const {
VirtSpecifiers::Specifier Specifier = isCXX11VirtSpecifier();
return Specifier == VirtSpecifiers::VS_Final ||
+ Specifier == VirtSpecifiers::VS_GNU_Final ||
Specifier == VirtSpecifiers::VS_Sealed;
}
@@ -2181,7 +2298,7 @@ void Parser::MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(
if (!(Function.TypeQuals & TypeQual)) {
std::string Name(FixItName);
Name += " ";
- Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name.c_str());
+ Insertion = FixItHint::CreateInsertion(VS.getFirstLocation(), Name);
Function.TypeQuals |= TypeQual;
*QualifierLoc = SpecLoc.getRawEncoding();
}
@@ -2322,10 +2439,9 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
return DeclGroupPtrTy::make(DeclGroupRef(Actions.ActOnUsingDeclaration(
- getCurScope(), AS,
- /* HasUsingKeyword */ false, SourceLocation(), SS, Name,
- /* AttrList */ nullptr,
- /* HasTypenameKeyword */ false, SourceLocation())));
+ getCurScope(), AS, /*UsingLoc*/ SourceLocation(),
+ /*TypenameLoc*/ SourceLocation(), SS, Name,
+ /*EllipsisLoc*/ SourceLocation(), /*AttrList*/ nullptr)));
}
}
@@ -2380,8 +2496,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
}
SourceLocation DeclEnd;
// Otherwise, it must be a using-declaration or an alias-declaration.
- return DeclGroupPtrTy::make(DeclGroupRef(ParseUsingDeclaration(
- Declarator::MemberContext, TemplateInfo, UsingLoc, DeclEnd, AS)));
+ return ParseUsingDeclaration(Declarator::MemberContext, TemplateInfo,
+ UsingLoc, DeclEnd, AS);
}
// Hold late-parsed attributes so we can attach a Decl to them later.
@@ -2996,6 +3112,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
if (getLangOpts().CPlusPlus && Tok.is(tok::identifier)) {
VirtSpecifiers::Specifier Specifier = isCXX11VirtSpecifier(Tok);
assert((Specifier == VirtSpecifiers::VS_Final ||
+ Specifier == VirtSpecifiers::VS_GNU_Final ||
Specifier == VirtSpecifiers::VS_Sealed) &&
"not a class definition");
FinalLoc = ConsumeToken();
@@ -3011,6 +3128,8 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
<< VirtSpecifiers::getSpecifierName(Specifier);
else if (Specifier == VirtSpecifiers::VS_Sealed)
Diag(FinalLoc, diag::ext_ms_sealed_keyword);
+ else if (Specifier == VirtSpecifiers::VS_GNU_Final)
+ Diag(FinalLoc, diag::ext_warn_gnu_final);
// Parse any C++11 attributes after 'final' keyword.
// These attributes are not allowed to appear here,
@@ -3456,7 +3575,11 @@ static void diagnoseDynamicExceptionSpecification(
Parser &P, SourceRange Range, bool IsNoexcept) {
if (P.getLangOpts().CPlusPlus11) {
const char *Replacement = IsNoexcept ? "noexcept" : "noexcept(false)";
- P.Diag(Range.getBegin(), diag::warn_exception_spec_deprecated) << Range;
+ P.Diag(Range.getBegin(),
+ P.getLangOpts().CPlusPlus1z && !IsNoexcept
+ ? diag::ext_dynamic_exception_spec
+ : diag::warn_exception_spec_deprecated)
+ << Range;
P.Diag(Range.getBegin(), diag::note_exception_spec_deprecated)
<< Replacement << FixItHint::CreateReplacement(Range, Replacement);
}
@@ -3655,8 +3778,8 @@ static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
return true;
case AttributeList::AT_WarnUnusedResult:
return !ScopeName && AttrName->getName().equals("nodiscard");
- case AttributeList::AT_Unused:
- return !ScopeName && AttrName->getName().equals("maybe_unused");
+ case AttributeList::AT_Unused:
+ return !ScopeName && AttrName->getName().equals("maybe_unused");
default:
return false;
}
@@ -3913,6 +4036,93 @@ SourceLocation Parser::SkipCXX11Attributes() {
return EndLoc;
}
+/// Parse uuid() attribute when it appears in a [] Microsoft attribute.
+void Parser::ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs) {
+ assert(Tok.is(tok::identifier) && "Not a Microsoft attribute list");
+ IdentifierInfo *UuidIdent = Tok.getIdentifierInfo();
+ assert(UuidIdent->getName() == "uuid" && "Not a Microsoft attribute list");
+
+ SourceLocation UuidLoc = Tok.getLocation();
+ ConsumeToken();
+
+ // Ignore the left paren location for now.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ return;
+ }
+
+ ArgsVector ArgExprs;
+ if (Tok.is(tok::string_literal)) {
+ // Easy case: uuid("...") -- quoted string.
+ ExprResult StringResult = ParseStringLiteralExpression();
+ if (StringResult.isInvalid())
+ return;
+ ArgExprs.push_back(StringResult.get());
+ } else {
+ // something like uuid({000000A0-0000-0000-C000-000000000049}) -- no
+ // quotes in the parens. Just append the spelling of all tokens encountered
+ // until the closing paren.
+
+ SmallString<42> StrBuffer; // 2 "", 36 bytes UUID, 2 optional {}, 1 nul
+ StrBuffer += "\"";
+
+ // Since none of C++'s keywords match [a-f]+, accepting just tok::l_brace,
+ // tok::r_brace, tok::minus, tok::identifier (think C000) and
+ // tok::numeric_constant (0000) should be enough. But the spelling of the
+ // uuid argument is checked later anyways, so there's no harm in accepting
+ // almost anything here.
+ // cl is very strict about whitespace in this form and errors out if any
+ // is present, so check the space flags on the tokens.
+ SourceLocation StartLoc = Tok.getLocation();
+ while (Tok.isNot(tok::r_paren)) {
+ if (Tok.hasLeadingSpace() || Tok.isAtStartOfLine()) {
+ Diag(Tok, diag::err_attribute_uuid_malformed_guid);
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ SmallString<16> SpellingBuffer;
+ SpellingBuffer.resize(Tok.getLength() + 1);
+ bool Invalid = false;
+ StringRef TokSpelling = PP.getSpelling(Tok, SpellingBuffer, &Invalid);
+ if (Invalid) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return;
+ }
+ StrBuffer += TokSpelling;
+ ConsumeAnyToken();
+ }
+ StrBuffer += "\"";
+
+ if (Tok.hasLeadingSpace() || Tok.isAtStartOfLine()) {
+ Diag(Tok, diag::err_attribute_uuid_malformed_guid);
+ ConsumeParen();
+ return;
+ }
+
+ // Pretend the user wrote the appropriate string literal here.
+ // ActOnStringLiteral() copies the string data into the literal, so it's
+ // ok that the Token points to StrBuffer.
+ Token Toks[1];
+ Toks[0].startToken();
+ Toks[0].setKind(tok::string_literal);
+ Toks[0].setLocation(StartLoc);
+ Toks[0].setLiteralData(StrBuffer.data());
+ Toks[0].setLength(StrBuffer.size());
+ StringLiteral *UuidString =
+ cast<StringLiteral>(Actions.ActOnStringLiteral(Toks, nullptr).get());
+ ArgExprs.push_back(UuidString);
+ }
+
+ if (!T.consumeClose()) {
+ // FIXME: Warn that this syntax is deprecated, with a Fix-It suggesting
+ // using __declspec(uuid()) instead.
+ Attrs.addNew(UuidIdent, SourceRange(UuidLoc, T.getCloseLocation()), nullptr,
+ SourceLocation(), ArgExprs.data(), ArgExprs.size(),
+ AttributeList::AS_Microsoft);
+ }
+}
+
/// ParseMicrosoftAttributes - Parse Microsoft attributes [Attr]
///
/// [MS] ms-attribute:
@@ -3929,7 +4139,18 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
// FIXME: If this is actually a C++11 attribute, parse it as one.
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
- SkipUntil(tok::r_square, StopAtSemi | StopBeforeMatch);
+
+ // Skip most ms attributes except for a whitelist.
+ while (true) {
+ SkipUntil(tok::r_square, tok::identifier, StopAtSemi | StopBeforeMatch);
+ if (Tok.isNot(tok::identifier)) // ']', but also eof
+ break;
+ if (Tok.getIdentifierInfo()->getName() == "uuid")
+ ParseMicrosoftUuidAttributeArgs(attrs);
+ else
+ ConsumeToken();
+ }
+
T.consumeClose();
if (endLoc)
*endLoc = T.getCloseLocation();
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index 3e87a73aafe8..caf2320f8fc1 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -21,15 +21,14 @@
///
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "RAIIObjectsForParser.h"
#include "clang/AST/ASTContext.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Parse/Parser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
using namespace clang;
@@ -886,7 +885,13 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// Allow the base to be 'super' if in an objc-method.
(&II == Ident_super && getCurScope()->isInObjcMethodScope()))) {
ConsumeToken();
-
+
+ if (Tok.is(tok::code_completion) && &II != Ident_super) {
+ Actions.CodeCompleteObjCClassPropertyRefExpr(
+ getCurScope(), II, ILoc, ExprStatementTokLoc == ILoc);
+ cutOffParsing();
+ return ExprError();
+ }
// Allow either an identifier or the keyword 'class' (in C++).
if (Tok.isNot(tok::identifier) &&
!(getLangOpts().CPlusPlus && Tok.is(tok::kw_class))) {
@@ -1647,9 +1652,10 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (Tok.is(tok::code_completion)) {
// Code completion for a member access expression.
- Actions.CodeCompleteMemberReferenceExpr(getCurScope(), LHS.get(),
- OpLoc, OpKind == tok::arrow);
-
+ Actions.CodeCompleteMemberReferenceExpr(
+ getCurScope(), LHS.get(), OpLoc, OpKind == tok::arrow,
+ ExprStatementTokLoc == LHS.get()->getLocStart());
+
cutOffParsing();
return ExprError();
}
@@ -2836,6 +2842,7 @@ ExprResult Parser::ParseBlockLiteralExpression() {
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
+ /*DeclsInPrototype=*/None,
CaretLoc, CaretLoc,
ParamInfo),
attrs, CaretLoc);
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index 85c1301fc967..ca1b3b1ad01b 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -100,48 +100,6 @@ void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
/*AtDigraph*/false);
}
-/// \brief Emits an error for a left parentheses after a double colon.
-///
-/// When a '(' is found after a '::', emit an error. Attempt to fix the token
-/// stream by removing the '(', and the matching ')' if found.
-void Parser::CheckForLParenAfterColonColon() {
- if (!Tok.is(tok::l_paren))
- return;
-
- Token LParen = Tok;
- Token NextTok = GetLookAheadToken(1);
- Token StarTok = NextTok;
- // Check for (identifier or (*identifier
- Token IdentifierTok = StarTok.is(tok::star) ? GetLookAheadToken(2) : StarTok;
- if (IdentifierTok.isNot(tok::identifier))
- return;
- // Eat the '('.
- ConsumeParen();
- Token RParen;
- RParen.setLocation(SourceLocation());
- // Do we have a ')' ?
- NextTok = StarTok.is(tok::star) ? GetLookAheadToken(2) : GetLookAheadToken(1);
- if (NextTok.is(tok::r_paren)) {
- RParen = NextTok;
- // Eat the '*' if it is present.
- if (StarTok.is(tok::star))
- ConsumeToken();
- // Eat the identifier.
- ConsumeToken();
- // Add the identifier token back.
- PP.EnterToken(IdentifierTok);
- // Add the '*' back if it was present.
- if (StarTok.is(tok::star))
- PP.EnterToken(StarTok);
- // Eat the ')'.
- ConsumeParen();
- }
-
- Diag(LParen.getLocation(), diag::err_paren_after_colon_colon)
- << FixItHint::CreateRemoval(LParen.getLocation())
- << FixItHint::CreateRemoval(RParen.getLocation());
-}
-
/// \brief Parse global scope or nested-name-specifier if present.
///
/// Parses a C++ global scope specifier ('::') or nested-name-specifier (which
@@ -237,8 +195,6 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
if (Actions.ActOnCXXGlobalScopeSpecifier(ConsumeToken(), SS))
return true;
- CheckForLParenAfterColonColon();
-
HasScopeSpecifier = true;
}
}
@@ -427,13 +383,13 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// namespace-name '::'
// nested-name-specifier identifier '::'
Token Next = NextToken();
-
+ Sema::NestedNameSpecInfo IdInfo(&II, Tok.getLocation(), Next.getLocation(),
+ ObjectType);
+
// If we get foo:bar, this is almost certainly a typo for foo::bar. Recover
// and emit a fixit hint for it.
if (Next.is(tok::colon) && !ColonIsSacred) {
- if (Actions.IsInvalidUnlessNestedName(getCurScope(), SS, II,
- Tok.getLocation(),
- Next.getLocation(), ObjectType,
+ if (Actions.IsInvalidUnlessNestedName(getCurScope(), SS, IdInfo,
EnteringContext) &&
// If the token after the colon isn't an identifier, it's still an
// error, but they probably meant something else strange so don't
@@ -459,8 +415,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
if (Next.is(tok::coloncolon)) {
if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde) &&
- !Actions.isNonTypeNestedNameSpecifier(
- getCurScope(), SS, Tok.getLocation(), II, ObjectType)) {
+ !Actions.isNonTypeNestedNameSpecifier(getCurScope(), SS, IdInfo)) {
*MayBePseudoDestructor = true;
return false;
}
@@ -492,12 +447,10 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
Token ColonColon = Tok;
SourceLocation CCLoc = ConsumeToken();
- CheckForLParenAfterColonColon();
-
bool IsCorrectedToColon = false;
bool *CorrectionFlagPtr = ColonIsSacred ? &IsCorrectedToColon : nullptr;
- if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(), II, IdLoc, CCLoc,
- ObjectType, EnteringContext, SS,
+ if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(), IdInfo,
+ EnteringContext, SS,
false, CorrectionFlagPtr)) {
// Identifier is not recognized as a nested name, but we can have
// mistyped '::' instead of ':'.
@@ -949,6 +902,8 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
SourceLocation StartLoc = Tok.getLocation();
InMessageExpressionRAIIObject MaybeInMessageExpression(*this, true);
Init = ParseInitializer();
+ if (!Init.isInvalid())
+ Init = Actions.CorrectDelayedTyposInExpr(Init.get());
if (Tok.getLocation() != StartLoc) {
// Back out the lexing of the token after the initializer.
@@ -1003,6 +958,7 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
// return y;
// }
// };
+ // }
// If x was not const, the second use would require 'L' to capture, and
// that would be an error.
@@ -1053,6 +1009,58 @@ bool Parser::TryParseLambdaIntroducer(LambdaIntroducer &Intro) {
return false;
}
+static void
+tryConsumeMutableOrConstexprToken(Parser &P, SourceLocation &MutableLoc,
+ SourceLocation &ConstexprLoc,
+ SourceLocation &DeclEndLoc) {
+ assert(MutableLoc.isInvalid());
+ assert(ConstexprLoc.isInvalid());
+ // Consume constexpr-opt mutable-opt in any sequence, and set the DeclEndLoc
+ // to the final of those locations. Emit an error if we have multiple
+ // copies of those keywords and recover.
+
+ while (true) {
+ switch (P.getCurToken().getKind()) {
+ case tok::kw_mutable: {
+ if (MutableLoc.isValid()) {
+ P.Diag(P.getCurToken().getLocation(),
+ diag::err_lambda_decl_specifier_repeated)
+ << 0 << FixItHint::CreateRemoval(P.getCurToken().getLocation());
+ }
+ MutableLoc = P.ConsumeToken();
+ DeclEndLoc = MutableLoc;
+ break /*switch*/;
+ }
+ case tok::kw_constexpr:
+ if (ConstexprLoc.isValid()) {
+ P.Diag(P.getCurToken().getLocation(),
+ diag::err_lambda_decl_specifier_repeated)
+ << 1 << FixItHint::CreateRemoval(P.getCurToken().getLocation());
+ }
+ ConstexprLoc = P.ConsumeToken();
+ DeclEndLoc = ConstexprLoc;
+ break /*switch*/;
+ default:
+ return;
+ }
+ }
+}
+
+static void
+addConstexprToLambdaDeclSpecifier(Parser &P, SourceLocation ConstexprLoc,
+ DeclSpec &DS) {
+ if (ConstexprLoc.isValid()) {
+ P.Diag(ConstexprLoc, !P.getLangOpts().CPlusPlus1z
+ ? diag::ext_constexpr_on_lambda_cxx1z
+ : diag::warn_cxx14_compat_constexpr_on_lambda);
+ const char *PrevSpec = nullptr;
+ unsigned DiagID = 0;
+ DS.SetConstexprSpec(ConstexprLoc, PrevSpec, DiagID);
+ assert(PrevSpec == nullptr && DiagID == 0 &&
+ "Constexpr cannot have been set previously!");
+ }
+}
+
/// ParseLambdaExpressionAfterIntroducer - Parse the rest of a lambda
/// expression.
ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
@@ -1072,7 +1080,27 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
DeclSpec DS(AttrFactory);
Declarator D(DS, Declarator::LambdaExprContext);
TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- Actions.PushLambdaScope();
+ Actions.PushLambdaScope();
+
+ ParsedAttributes Attr(AttrFactory);
+ SourceLocation DeclLoc = Tok.getLocation();
+ if (getLangOpts().CUDA) {
+ // In CUDA code, GNU attributes are allowed to appear immediately after the
+ // "[...]", even if there is no "(...)" before the lambda body.
+ MaybeParseGNUAttributes(D);
+ }
+
+ // Helper to emit a warning if we see a CUDA host/device/global attribute
+ // after '(...)'. nvcc doesn't accept this.
+ auto WarnIfHasCUDATargetAttr = [&] {
+ if (getLangOpts().CUDA)
+ for (auto *A = Attr.getList(); A != nullptr; A = A->getNext())
+ if (A->getKind() == AttributeList::AT_CUDADevice ||
+ A->getKind() == AttributeList::AT_CUDAHost ||
+ A->getKind() == AttributeList::AT_CUDAGlobal)
+ Diag(A->getLoc(), diag::warn_cuda_attr_lambda_position)
+ << A->getName()->getName();
+ };
TypeResult TrailingReturnType;
if (Tok.is(tok::l_paren)) {
@@ -1081,13 +1109,11 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
Scope::FunctionDeclarationScope |
Scope::DeclScope);
- SourceLocation DeclEndLoc;
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
SourceLocation LParenLoc = T.getOpenLocation();
// Parse parameter-declaration-clause.
- ParsedAttributes Attr(AttrFactory);
SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
SourceLocation EllipsisLoc;
@@ -1101,7 +1127,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
}
T.consumeClose();
SourceLocation RParenLoc = T.getCloseLocation();
- DeclEndLoc = RParenLoc;
+ SourceLocation DeclEndLoc = RParenLoc;
// GNU-style attributes must be parsed before the mutable specifier to be
// compatible with GCC.
@@ -1111,10 +1137,13 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
// compatible with MSVC.
MaybeParseMicrosoftDeclSpecs(Attr, &DeclEndLoc);
- // Parse 'mutable'[opt].
+ // Parse mutable-opt and/or constexpr-opt, and update the DeclEndLoc.
SourceLocation MutableLoc;
- if (TryConsumeToken(tok::kw_mutable, MutableLoc))
- DeclEndLoc = MutableLoc;
+ SourceLocation ConstexprLoc;
+ tryConsumeMutableOrConstexprToken(*this, MutableLoc, ConstexprLoc,
+ DeclEndLoc);
+
+ addConstexprToLambdaDeclSpecifier(*this, ConstexprLoc, DS);
// Parse exception-specification[opt].
ExceptionSpecificationType ESpecType = EST_None;
@@ -1149,6 +1178,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
PrototypeScope.Exit();
+ WarnIfHasCUDATargetAttr();
+
SourceLocation NoLoc;
D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
/*isAmbiguous=*/false,
@@ -1169,10 +1200,12 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
NoexceptExpr.isUsable() ?
NoexceptExpr.get() : nullptr,
/*ExceptionSpecTokens*/nullptr,
+ /*DeclsInPrototype=*/None,
LParenLoc, FunLocalRangeEnd, D,
TrailingReturnType),
Attr, DeclEndLoc);
- } else if (Tok.isOneOf(tok::kw_mutable, tok::arrow, tok::kw___attribute) ||
+ } else if (Tok.isOneOf(tok::kw_mutable, tok::arrow, tok::kw___attribute,
+ tok::kw_constexpr) ||
(Tok.is(tok::l_square) && NextToken().is(tok::l_square))) {
// It's common to forget that one needs '()' before 'mutable', an attribute
// specifier, or the result type. Deal with this.
@@ -1182,18 +1215,17 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
case tok::arrow: TokKind = 1; break;
case tok::kw___attribute:
case tok::l_square: TokKind = 2; break;
+ case tok::kw_constexpr: TokKind = 3; break;
default: llvm_unreachable("Unknown token kind");
}
Diag(Tok, diag::err_lambda_missing_parens)
<< TokKind
<< FixItHint::CreateInsertion(Tok.getLocation(), "() ");
- SourceLocation DeclLoc = Tok.getLocation();
SourceLocation DeclEndLoc = DeclLoc;
// GNU-style attributes must be parsed before the mutable specifier to be
// compatible with GCC.
- ParsedAttributes Attr(AttrFactory);
MaybeParseGNUAttributes(Attr, &DeclEndLoc);
// Parse 'mutable', if it's there.
@@ -1214,6 +1246,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
DeclEndLoc = Range.getEnd();
}
+ WarnIfHasCUDATargetAttr();
+
SourceLocation NoLoc;
D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
/*isAmbiguous=*/false,
@@ -1236,11 +1270,11 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
+ /*DeclsInPrototype=*/None,
DeclLoc, DeclEndLoc, D,
TrailingReturnType),
Attr, DeclEndLoc);
}
-
// FIXME: Rename BlockScope -> ClosureScope if we decide to continue using
// it.
@@ -1711,6 +1745,10 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
}
case ConditionOrInitStatement::InitStmtDecl: {
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus1z
+ ? diag::warn_cxx14_compat_init_statement
+ : diag::ext_init_statement)
+ << (CK == Sema::ConditionKind::Switch);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy DG = ParseSimpleDeclaration(
Declarator::InitStmtContext, DeclEnd, attrs, /*RequireSemi=*/true);
diff --git a/lib/Parse/ParseInit.cpp b/lib/Parse/ParseInit.cpp
index 2cdb9d3a22a6..4a68942f6d2c 100644
--- a/lib/Parse/ParseInit.cpp
+++ b/lib/Parse/ParseInit.cpp
@@ -11,13 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "RAIIObjectsForParser.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
#include "clang/Sema/Designator.h"
#include "clang/Sema/Scope.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/raw_ostream.h"
using namespace clang;
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index 67abe5839bfe..81761bf8d2d8 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -344,9 +344,11 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
protocols, protocolLocs, EndProtoLoc,
/*consumeLastToken=*/true,
/*warnOnIncompleteProtocols=*/true);
+ if (Tok.is(tok::eof))
+ return nullptr;
}
}
-
+
// Next, we need to check for any protocol references.
if (LAngleLoc.isValid()) {
if (!ProtocolIdents.empty()) {
@@ -367,7 +369,8 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
}
if (Tok.isNot(tok::less))
- Actions.ActOnTypedefedProtocols(protocols, superClassId, superClassLoc);
+ Actions.ActOnTypedefedProtocols(protocols, protocolLocs,
+ superClassId, superClassLoc);
Decl *ClsType =
Actions.ActOnStartClassInterface(getCurScope(), AtLoc, nameId, nameLoc,
@@ -1034,7 +1037,7 @@ IdentifierInfo *Parser::ParseObjCSelectorPiece(SourceLocation &SelectorLoc) {
case tok::caretequal: {
std::string ThisTok(PP.getSpelling(Tok));
if (isLetter(ThisTok[0])) {
- IdentifierInfo *II = &PP.getIdentifierTable().get(ThisTok.data());
+ IdentifierInfo *II = &PP.getIdentifierTable().get(ThisTok);
Tok.setKind(tok::identifier);
SelectorLoc = ConsumeToken();
return II;
@@ -1814,6 +1817,8 @@ void Parser::parseObjCTypeArgsAndProtocolQualifiers(
protocolRAngleLoc,
consumeLastToken,
/*warnOnIncompleteProtocols=*/false);
+ if (Tok.is(tok::eof)) // Nothing else to do here...
+ return;
// An Objective-C object pointer followed by type arguments
// can then be followed again by a set of protocol references, e.g.,
@@ -1862,6 +1867,9 @@ TypeResult Parser::parseObjCTypeArgsAndProtocolQualifiers(
protocols, protocolLocs,
protocolRAngleLoc, consumeLastToken);
+ if (Tok.is(tok::eof))
+ return true; // Invalid type result.
+
// Compute the location of the last token.
if (consumeLastToken)
endLoc = PrevTokLocation;
@@ -2238,7 +2246,6 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc) {
while (!ObjCImplParsing.isFinished() && !isEofOrEom()) {
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
- MaybeParseMicrosoftAttributes(attrs);
if (DeclGroupPtrTy DGP = ParseExternalDeclaration(attrs)) {
DeclGroupRef DG = DGP.get();
DeclsInGroup.append(DG.begin(), DG.end());
@@ -2766,6 +2773,7 @@ StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
return Actions.ActOnNullStmt(Tok.getLocation());
}
+ ExprStatementTokLoc = AtLoc;
ExprResult Res(ParseExpressionWithLeadingAt(AtLoc));
if (Res.isInvalid()) {
// If the expression is invalid, skip ahead to the next semicolon. Not
@@ -2862,7 +2870,11 @@ ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
return ParseAvailabilityCheckExpr(AtLoc);
default: {
const char *str = nullptr;
- if (GetLookAheadToken(1).is(tok::l_brace)) {
+ // Only provide the @try/@finally/@autoreleasepool fixit when we're sure
+ // that this is a proper statement where such directives could actually
+ // occur.
+ if (GetLookAheadToken(1).is(tok::l_brace) &&
+ ExprStatementTokLoc == AtLoc) {
char ch = Tok.getIdentifierInfo()->getNameStart()[0];
str =
ch == 't' ? "try"
@@ -3416,6 +3428,7 @@ ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
ExprVector ElementExprs; // array elements.
ConsumeBracket(); // consume the l_square.
+ bool HasInvalidEltExpr = false;
while (Tok.isNot(tok::r_square)) {
// Parse list of array element expressions (all must be id types).
ExprResult Res(ParseAssignmentExpression());
@@ -3427,11 +3440,15 @@ ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
return Res;
}
+ Res = Actions.CorrectDelayedTyposInExpr(Res.get());
+ if (Res.isInvalid())
+ HasInvalidEltExpr = true;
+
// Parse the ellipsis that indicates a pack expansion.
if (Tok.is(tok::ellipsis))
Res = Actions.ActOnPackExpansion(Res.get(), ConsumeToken());
if (Res.isInvalid())
- return true;
+ HasInvalidEltExpr = true;
ElementExprs.push_back(Res.get());
@@ -3442,6 +3459,10 @@ ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
<< tok::comma);
}
SourceLocation EndLoc = ConsumeBracket(); // location of ']'
+
+ if (HasInvalidEltExpr)
+ return ExprError();
+
MultiExprArg Args(ElementExprs);
return Actions.BuildObjCArrayLiteral(SourceRange(AtLoc, EndLoc), Args);
}
@@ -3449,6 +3470,7 @@ ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
SmallVector<ObjCDictionaryElement, 4> Elements; // dictionary elements.
ConsumeBrace(); // consume the l_square.
+ bool HasInvalidEltExpr = false;
while (Tok.isNot(tok::r_brace)) {
// Parse the comma separated key : value expressions.
ExprResult KeyExpr;
@@ -3478,7 +3500,15 @@ ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
return ValueExpr;
}
- // Parse the ellipsis that designates this as a pack expansion.
+ // Check the key and value for possible typos
+ KeyExpr = Actions.CorrectDelayedTyposInExpr(KeyExpr.get());
+ ValueExpr = Actions.CorrectDelayedTyposInExpr(ValueExpr.get());
+ if (KeyExpr.isInvalid() || ValueExpr.isInvalid())
+ HasInvalidEltExpr = true;
+
+ // Parse the ellipsis that designates this as a pack expansion. Do not
+ // ActOnPackExpansion here, leave it to template instantiation time where
+ // we can get better diagnostics.
SourceLocation EllipsisLoc;
if (getLangOpts().CPlusPlus)
TryConsumeToken(tok::ellipsis, EllipsisLoc);
@@ -3495,6 +3525,9 @@ ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
<< tok::comma);
}
SourceLocation EndLoc = ConsumeBrace();
+
+ if (HasInvalidEltExpr)
+ return ExprError();
// Create the ObjCDictionaryLiteral.
return Actions.BuildObjCDictionaryLiteral(SourceRange(AtLoc, EndLoc),
diff --git a/lib/Parse/ParseOpenMP.cpp b/lib/Parse/ParseOpenMP.cpp
index df7d9bc0d8c8..061721dfb8da 100644
--- a/lib/Parse/ParseOpenMP.cpp
+++ b/lib/Parse/ParseOpenMP.cpp
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#include "RAIIObjectsForParser.h"
-#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/StmtOpenMP.h"
#include "clang/Parse/ParseDiagnostic.h"
@@ -40,7 +39,9 @@ enum OpenMPDirectiveKindEx {
OMPD_target_enter,
OMPD_target_exit,
OMPD_update,
- OMPD_distribute_parallel
+ OMPD_distribute_parallel,
+ OMPD_teams_distribute_parallel,
+ OMPD_target_teams_distribute_parallel
};
class ThreadprivateListParserHelper final {
@@ -107,8 +108,18 @@ static OpenMPDirectiveKind ParseOpenMPDirectiveKind(Parser &P) {
{ OMPD_parallel, OMPD_sections, OMPD_parallel_sections },
{ OMPD_taskloop, OMPD_simd, OMPD_taskloop_simd },
{ OMPD_target, OMPD_parallel, OMPD_target_parallel },
+ { OMPD_target, OMPD_simd, OMPD_target_simd },
{ OMPD_target_parallel, OMPD_for, OMPD_target_parallel_for },
- { OMPD_target_parallel_for, OMPD_simd, OMPD_target_parallel_for_simd }
+ { OMPD_target_parallel_for, OMPD_simd, OMPD_target_parallel_for_simd },
+ { OMPD_teams, OMPD_distribute, OMPD_teams_distribute },
+ { OMPD_teams_distribute, OMPD_simd, OMPD_teams_distribute_simd },
+ { OMPD_teams_distribute, OMPD_parallel, OMPD_teams_distribute_parallel },
+ { OMPD_teams_distribute_parallel, OMPD_for, OMPD_teams_distribute_parallel_for },
+ { OMPD_teams_distribute_parallel_for, OMPD_simd, OMPD_teams_distribute_parallel_for_simd },
+ { OMPD_target, OMPD_teams, OMPD_target_teams },
+ { OMPD_target_teams, OMPD_distribute, OMPD_target_teams_distribute },
+ { OMPD_target_teams_distribute, OMPD_parallel, OMPD_target_teams_distribute_parallel },
+ { OMPD_target_teams_distribute_parallel, OMPD_for, OMPD_target_teams_distribute_parallel_for }
};
enum { CancellationPoint = 0, DeclareReduction = 1, TargetData = 2 };
auto Tok = P.getCurToken();
@@ -609,7 +620,6 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
if (AS == AS_none) {
assert(TagType == DeclSpec::TST_unspecified);
MaybeParseCXX11Attributes(Attrs);
- MaybeParseMicrosoftAttributes(Attrs);
ParsingDeclSpec PDS(*this);
Ptr = ParseExternalDeclaration(Attrs, &PDS);
} else {
@@ -671,7 +681,6 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
Tok.isNot(tok::eof) && Tok.isNot(tok::r_brace)) {
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
- MaybeParseMicrosoftAttributes(attrs);
ParseExternalDeclaration(attrs);
if (Tok.isAnnotation() && Tok.is(tok::annot_pragma_openmp)) {
TentativeParsingAction TPA(*this);
@@ -741,6 +750,14 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_distribute_parallel_for_simd:
case OMPD_distribute_simd:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_target_teams:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_parallel_for:
Diag(Tok, diag::err_omp_unexpected_directive)
<< getOpenMPDirectiveName(DKind);
break;
@@ -774,7 +791,12 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/// 'target parallel' | 'target parallel for' |
/// 'target update' | 'distribute parallel for' |
/// 'distribute paralle for simd' | 'distribute simd' |
-/// 'target parallel for simd' {clause}
+/// 'target parallel for simd' | 'target simd' |
+/// 'teams distribute' | 'teams distribute simd' |
+/// 'teams distribute parallel for simd' |
+/// 'teams distribute parallel for' | 'target teams' |
+/// 'target teams distribute' |
+/// 'target teams distribute parallel for' {clause}
/// annot_pragma_openmp_end
///
StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
@@ -882,7 +904,15 @@ StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_distribute_simd:
- case OMPD_target_parallel_for_simd: {
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_target_teams:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_parallel_for: {
ConsumeToken();
// Parse directive name of the 'critical' directive if any.
if (DKind == OMPD_critical) {
@@ -1441,15 +1471,19 @@ OMPClause *Parser::ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind) {
} else {
assert(Kind == OMPC_if);
KLoc.push_back(Tok.getLocation());
+ TentativeParsingAction TPA(*this);
Arg.push_back(ParseOpenMPDirectiveKind(*this));
if (Arg.back() != OMPD_unknown) {
ConsumeToken();
- if (Tok.is(tok::colon))
+ if (Tok.is(tok::colon) && getLangOpts().OpenMP > 40) {
+ TPA.Commit();
DelimLoc = ConsumeToken();
- else
- Diag(Tok, diag::warn_pragma_expected_colon)
- << "directive name modifier";
- }
+ } else {
+ TPA.Revert();
+ Arg.back() = OMPD_unknown;
+ }
+ } else
+ TPA.Revert();
}
bool NeedAnExpression = (Kind == OMPC_schedule && DelimLoc.isValid()) ||
diff --git a/lib/Parse/ParsePragma.cpp b/lib/Parse/ParsePragma.cpp
index bff5d1170fe0..2dc6a0739bc8 100644
--- a/lib/Parse/ParsePragma.cpp
+++ b/lib/Parse/ParsePragma.cpp
@@ -161,6 +161,22 @@ struct PragmaMSRuntimeChecksHandler : public EmptyPragmaHandler {
PragmaMSRuntimeChecksHandler() : EmptyPragmaHandler("runtime_checks") {}
};
+struct PragmaMSIntrinsicHandler : public PragmaHandler {
+ PragmaMSIntrinsicHandler() : PragmaHandler("intrinsic") {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+};
+
+struct PragmaForceCUDAHostDeviceHandler : public PragmaHandler {
+ PragmaForceCUDAHostDeviceHandler(Sema &Actions)
+ : PragmaHandler("force_cuda_host_device"), Actions(Actions) {}
+ void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) override;
+
+private:
+ Sema &Actions;
+};
+
} // end namespace
void Parser::initializePragmaHandlers() {
@@ -229,6 +245,14 @@ void Parser::initializePragmaHandlers() {
PP.AddPragmaHandler(MSSection.get());
MSRuntimeChecks.reset(new PragmaMSRuntimeChecksHandler());
PP.AddPragmaHandler(MSRuntimeChecks.get());
+ MSIntrinsic.reset(new PragmaMSIntrinsicHandler());
+ PP.AddPragmaHandler(MSIntrinsic.get());
+ }
+
+ if (getLangOpts().CUDA) {
+ CUDAForceHostDeviceHandler.reset(
+ new PragmaForceCUDAHostDeviceHandler(Actions));
+ PP.AddPragmaHandler("clang", CUDAForceHostDeviceHandler.get());
}
OptimizeHandler.reset(new PragmaOptimizeHandler(Actions));
@@ -297,6 +321,13 @@ void Parser::resetPragmaHandlers() {
MSSection.reset();
PP.RemovePragmaHandler(MSRuntimeChecks.get());
MSRuntimeChecks.reset();
+ PP.RemovePragmaHandler(MSIntrinsic.get());
+ MSIntrinsic.reset();
+ }
+
+ if (getLangOpts().CUDA) {
+ PP.RemovePragmaHandler("clang", CUDAForceHostDeviceHandler.get());
+ CUDAForceHostDeviceHandler.reset();
}
PP.RemovePragmaHandler("STDC", FPContractHandler.get());
@@ -455,42 +486,48 @@ StmtResult Parser::HandlePragmaCaptured()
}
namespace {
- typedef llvm::PointerIntPair<IdentifierInfo *, 1, bool> OpenCLExtData;
+ enum OpenCLExtState : char {
+ Disable, Enable, Begin, End
+ };
+ typedef std::pair<const IdentifierInfo *, OpenCLExtState> OpenCLExtData;
}
void Parser::HandlePragmaOpenCLExtension() {
assert(Tok.is(tok::annot_pragma_opencl_extension));
- OpenCLExtData data =
- OpenCLExtData::getFromOpaqueValue(Tok.getAnnotationValue());
- unsigned state = data.getInt();
- IdentifierInfo *ename = data.getPointer();
+ OpenCLExtData *Data = static_cast<OpenCLExtData*>(Tok.getAnnotationValue());
+ auto State = Data->second;
+ auto Ident = Data->first;
SourceLocation NameLoc = Tok.getLocation();
ConsumeToken(); // The annotation token.
- OpenCLOptions &f = Actions.getOpenCLOptions();
- auto CLVer = getLangOpts().OpenCLVersion;
- auto &Supp = getTargetInfo().getSupportedOpenCLOpts();
+ auto &Opt = Actions.getOpenCLOptions();
+ auto Name = Ident->getName();
// OpenCL 1.1 9.1: "The all variant sets the behavior for all extensions,
// overriding all previously issued extension directives, but only if the
// behavior is set to disable."
- if (state == 0 && ename->isStr("all")) {
-#define OPENCLEXT(nm) \
- if (Supp.is_##nm##_supported_extension(CLVer)) \
- f.nm = 0;
-#include "clang/Basic/OpenCLExtensions.def"
- }
-#define OPENCLEXT(nm) else if (ename->isStr(#nm)) \
- if (Supp.is_##nm##_supported_extension(CLVer)) \
- f.nm = state; \
- else if (Supp.is_##nm##_supported_core(CLVer)) \
- PP.Diag(NameLoc, diag::warn_pragma_extension_is_core) << ename; \
- else \
- PP.Diag(NameLoc, diag::warn_pragma_unsupported_extension) << ename;
-#include "clang/Basic/OpenCLExtensions.def"
- else {
- PP.Diag(NameLoc, diag::warn_pragma_unknown_extension) << ename;
- return;
- }
+ if (Name == "all") {
+ if (State == Disable)
+ Opt.disableAll();
+ else
+ PP.Diag(NameLoc, diag::warn_pragma_expected_predicate) << 1;
+ } else if (State == Begin) {
+ if (!Opt.isKnown(Name) ||
+ !Opt.isSupported(Name, getLangOpts().OpenCLVersion)) {
+ Opt.support(Name);
+ }
+ Actions.setCurrentOpenCLExtension(Name);
+ } else if (State == End) {
+ if (Name != Actions.getCurrentOpenCLExtension())
+ PP.Diag(NameLoc, diag::warn_pragma_begin_end_mismatch);
+ Actions.setCurrentOpenCLExtension("");
+ } else if (!Opt.isKnown(Name))
+ PP.Diag(NameLoc, diag::warn_pragma_unknown_extension) << Ident;
+ else if (Opt.isSupportedExtension(Name, getLangOpts().OpenCLVersion))
+ Opt.enable(Name, State == Enable);
+ else if (Opt.isSupportedCore(Name, getLangOpts().OpenCLVersion))
+ PP.Diag(NameLoc, diag::warn_pragma_extension_is_core) << Ident;
+ else
+ PP.Diag(NameLoc, diag::warn_pragma_unsupported_extension) << Ident;
}
void Parser::HandlePragmaMSPointersToMembers() {
@@ -1410,29 +1447,34 @@ PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
"OPENCL";
return;
}
- IdentifierInfo *ename = Tok.getIdentifierInfo();
+ IdentifierInfo *Ext = Tok.getIdentifierInfo();
SourceLocation NameLoc = Tok.getLocation();
PP.Lex(Tok);
if (Tok.isNot(tok::colon)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_colon) << ename;
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_colon) << Ext;
return;
}
PP.Lex(Tok);
if (Tok.isNot(tok::identifier)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_enable_disable);
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_predicate) << 0;
return;
}
- IdentifierInfo *op = Tok.getIdentifierInfo();
-
- unsigned state;
- if (op->isStr("enable")) {
- state = 1;
- } else if (op->isStr("disable")) {
- state = 0;
- } else {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_enable_disable);
+ IdentifierInfo *Pred = Tok.getIdentifierInfo();
+
+ OpenCLExtState State;
+ if (Pred->isStr("enable")) {
+ State = Enable;
+ } else if (Pred->isStr("disable")) {
+ State = Disable;
+ } else if (Pred->isStr("begin"))
+ State = Begin;
+ else if (Pred->isStr("end"))
+ State = End;
+ else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_predicate)
+ << Ext->isStr("all");
return;
}
SourceLocation StateLoc = Tok.getLocation();
@@ -1444,19 +1486,21 @@ PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
return;
}
- OpenCLExtData data(ename, state);
+ auto Info = PP.getPreprocessorAllocator().Allocate<OpenCLExtData>(1);
+ Info->first = Ext;
+ Info->second = State;
MutableArrayRef<Token> Toks(PP.getPreprocessorAllocator().Allocate<Token>(1),
1);
Toks[0].startToken();
Toks[0].setKind(tok::annot_pragma_opencl_extension);
Toks[0].setLocation(NameLoc);
- Toks[0].setAnnotationValue(data.getOpaqueValue());
+ Toks[0].setAnnotationValue(static_cast<void*>(Info));
Toks[0].setAnnotationEndLoc(StateLoc);
PP.EnterTokenStream(Toks, /*DisableMacroExpansion=*/true);
if (PP.getPPCallbacks())
- PP.getPPCallbacks()->PragmaOpenCLExtension(NameLoc, ename,
- StateLoc, state);
+ PP.getPPCallbacks()->PragmaOpenCLExtension(NameLoc, Ext,
+ StateLoc, State);
}
/// \brief Handle '#pragma omp ...' when OpenMP is disabled.
@@ -2127,3 +2171,76 @@ void PragmaUnrollHintHandler::HandlePragma(Preprocessor &PP,
PP.EnterTokenStream(std::move(TokenArray), 1,
/*DisableMacroExpansion=*/false);
}
+
+/// \brief Handle the Microsoft \#pragma intrinsic extension.
+///
+/// The syntax is:
+/// \code
+/// #pragma intrinsic(memset)
+/// #pragma intrinsic(strlen, memcpy)
+/// \endcode
+///
+/// Pragma intrisic tells the compiler to use a builtin version of the
+/// function. Clang does it anyway, so the pragma doesn't really do anything.
+/// Anyway, we emit a warning if the function specified in \#pragma intrinsic
+/// isn't an intrinsic in clang and suggest to include intrin.h.
+void PragmaMSIntrinsicHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen)
+ << "intrinsic";
+ return;
+ }
+ PP.Lex(Tok);
+
+ bool SuggestIntrinH = !PP.isMacroDefined("__INTRIN_H");
+
+ while (Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (!II->getBuiltinID())
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_intrinsic_builtin)
+ << II << SuggestIntrinH;
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::comma))
+ break;
+ PP.Lex(Tok);
+ }
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen)
+ << "intrinsic";
+ return;
+ }
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "intrinsic";
+}
+void PragmaForceCUDAHostDeviceHandler::HandlePragma(
+ Preprocessor &PP, PragmaIntroducerKind Introducer, Token &Tok) {
+ Token FirstTok = Tok;
+
+ PP.Lex(Tok);
+ IdentifierInfo *Info = Tok.getIdentifierInfo();
+ if (!Info || (!Info->isStr("begin") && !Info->isStr("end"))) {
+ PP.Diag(FirstTok.getLocation(),
+ diag::warn_pragma_force_cuda_host_device_bad_arg);
+ return;
+ }
+
+ if (Info->isStr("begin"))
+ Actions.PushForceCUDAHostDevice();
+ else if (!Actions.PopForceCUDAHostDevice())
+ PP.Diag(FirstTok.getLocation(),
+ diag::err_pragma_cannot_end_force_cuda_host_device);
+
+ PP.Lex(Tok);
+ if (!Tok.is(tok::eod))
+ PP.Diag(FirstTok.getLocation(),
+ diag::warn_pragma_force_cuda_host_device_bad_arg);
+}
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index fa8eb12044be..30e392fa3c94 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -12,18 +12,15 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "RAIIObjectsForParser.h"
-#include "clang/AST/ASTContext.h"
#include "clang/Basic/Attributes.h"
-#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Parse/Parser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/LoopHint.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
-#include "llvm/ADT/SmallString.h"
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -209,7 +206,8 @@ Retry:
}
default: {
- if ((getLangOpts().CPlusPlus || Allowed == ACK_Any) &&
+ if ((getLangOpts().CPlusPlus || getLangOpts().MicrosoftExt ||
+ Allowed == ACK_Any) &&
isDeclarationStatement()) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy Decl = ParseDeclaration(Declarator::BlockContext,
@@ -398,6 +396,8 @@ StmtResult Parser::ParseExprStatement() {
// If a case keyword is missing, this is where it should be inserted.
Token OldToken = Tok;
+ ExprStatementTokLoc = Tok.getLocation();
+
// expression[opt] ';'
ExprResult Expr(ParseExpression());
if (Expr.isInvalid()) {
diff --git a/lib/Parse/ParseStmtAsm.cpp b/lib/Parse/ParseStmtAsm.cpp
index 1f63dc257b86..293de78505ef 100644
--- a/lib/Parse/ParseStmtAsm.cpp
+++ b/lib/Parse/ParseStmtAsm.cpp
@@ -681,12 +681,12 @@ StmtResult Parser::ParseAsmStatement(bool &msAsm) {
// GNU asms accept, but warn, about type-qualifiers other than volatile.
if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
- Diag(Loc, diag::w_asm_qualifier_ignored) << "const";
+ Diag(Loc, diag::warn_asm_qualifier_ignored) << "const";
if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
- Diag(Loc, diag::w_asm_qualifier_ignored) << "restrict";
+ Diag(Loc, diag::warn_asm_qualifier_ignored) << "restrict";
// FIXME: Once GCC supports _Atomic, check whether it permits it here.
if (DS.getTypeQualifiers() & DeclSpec::TQ_atomic)
- Diag(Loc, diag::w_asm_qualifier_ignored) << "_Atomic";
+ Diag(Loc, diag::warn_asm_qualifier_ignored) << "_Atomic";
// Remember if this was a volatile asm.
bool isVolatile = DS.getTypeQualifiers() & DeclSpec::TQ_volatile;
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index 6cf7b6d3dc55..6a09ea7abca0 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -11,12 +11,11 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "RAIIObjectsForParser.h"
-#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
@@ -197,9 +196,12 @@ Parser::ParseSingleDeclarationAfterTemplate(
ParsedAttributesWithRange prefixAttrs(AttrFactory);
MaybeParseCXX11Attributes(prefixAttrs);
- if (Tok.is(tok::kw_using))
- return ParseUsingDirectiveOrDeclaration(Context, TemplateInfo, DeclEnd,
- prefixAttrs);
+ if (Tok.is(tok::kw_using)) {
+ // FIXME: We should return the DeclGroup to the caller.
+ ParseUsingDirectiveOrDeclaration(Context, TemplateInfo, DeclEnd,
+ prefixAttrs);
+ return nullptr;
+ }
// Parse the declaration specifiers, stealing any diagnostics from
// the template parameters.
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
index 7703c33b8780..0ea3f8d95179 100644
--- a/lib/Parse/ParseTentative.cpp
+++ b/lib/Parse/ParseTentative.cpp
@@ -74,11 +74,18 @@ bool Parser::isCXXDeclarationStatement() {
///
/// simple-declaration:
/// decl-specifier-seq init-declarator-list[opt] ';'
+/// decl-specifier-seq ref-qualifier[opt] '[' identifier-list ']'
+/// brace-or-equal-initializer ';' [C++17]
///
/// (if AllowForRangeDecl specified)
/// for ( for-range-declaration : for-range-initializer ) statement
+///
/// for-range-declaration:
-/// attribute-specifier-seqopt type-specifier-seq declarator
+/// decl-specifier-seq declarator
+/// decl-specifier-seq ref-qualifier[opt] '[' identifier-list ']'
+///
+/// In any of the above cases there can be a preceding attribute-specifier-seq,
+/// but the caller is expected to handle that.
bool Parser::isCXXSimpleDeclaration(bool AllowForRangeDecl) {
// C++ 6.8p1:
// There is an ambiguity in the grammar involving expression-statements and
@@ -902,7 +909,7 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
// '(' abstract-declarator ')'
if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec, tok::kw___cdecl,
tok::kw___stdcall, tok::kw___fastcall, tok::kw___thiscall,
- tok::kw___vectorcall))
+ tok::kw___regcall, tok::kw___vectorcall))
return TPResult::True; // attributes indicate declaration
TPResult TPR = TryParseDeclarator(mayBeAbstract, mayHaveIdentifier);
if (TPR != TPResult::Ambiguous)
@@ -1051,6 +1058,7 @@ Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___regcall:
case tok::kw___vectorcall:
case tok::kw___unaligned:
case tok::kw___vector:
@@ -1344,6 +1352,7 @@ Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult,
case tok::kw___stdcall:
case tok::kw___fastcall:
case tok::kw___thiscall:
+ case tok::kw___regcall:
case tok::kw___vectorcall:
case tok::kw___w64:
case tok::kw___sptr:
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index f968f995d53f..d8a4ea63153a 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -20,7 +20,6 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
-#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -78,7 +77,6 @@ Parser::Parser(Preprocessor &pp, Sema &actions, bool skipFunctionBodies)
Tok.setKind(tok::eof);
Actions.CurScope = nullptr;
NumCachedScopes = 0;
- ParenCount = BracketCount = BraceCount = 0;
CurParsedObjCImpl = nullptr;
// Add #pragma handlers. These are removed and destroyed in the
@@ -474,6 +472,7 @@ void Parser::Initialize() {
Ident_final = nullptr;
Ident_sealed = nullptr;
Ident_override = nullptr;
+ Ident_GNU_final = nullptr;
Ident_super = &PP.getIdentifierTable().get("super");
@@ -537,6 +536,36 @@ void Parser::LateTemplateParserCleanupCallback(void *P) {
DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(((Parser *)P)->TemplateIds);
}
+bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
+ // C++ Modules TS: module-declaration must be the first declaration in the
+ // file. (There can be no preceding preprocessor directives, but we expect
+ // the lexer to check that.)
+ if (Tok.is(tok::kw_module)) {
+ Result = ParseModuleDecl();
+ return false;
+ } else if (getLangOpts().getCompilingModule() ==
+ LangOptions::CMK_ModuleInterface) {
+ // FIXME: We avoid providing this diagnostic when generating an object file
+ // from an existing PCM file. This is not a good way to detect this
+ // condition; we should provide a mechanism to indicate whether we've
+ // already parsed a declaration in this translation unit and avoid calling
+ // ParseFirstTopLevelDecl in that case.
+ if (Actions.TUKind == TU_Module)
+ Diag(Tok, diag::err_expected_module_interface_decl);
+ }
+
+ // C11 6.9p1 says translation units must have at least one top-level
+ // declaration. C++ doesn't have this restriction. We also don't want to
+ // complain if we have a precompiled header, although technically if the PCH
+ // is empty we should still emit the (pedantic) diagnostic.
+ bool NoTopLevelDecls = ParseTopLevelDecl(Result);
+ if (NoTopLevelDecls && !Actions.getASTContext().getExternalSource() &&
+ !getLangOpts().CPlusPlus)
+ Diag(diag::ext_empty_translation_unit);
+
+ return NoTopLevelDecls;
+}
+
/// ParseTopLevelDecl - Parse one top-level declaration, return whatever the
/// action tells us to. This returns true if the EOF was encountered.
bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
@@ -553,6 +582,10 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
HandlePragmaUnused();
return false;
+ case tok::kw_import:
+ Result = ParseModuleImport(SourceLocation());
+ return false;
+
case tok::annot_module_include:
Actions.ActOnModuleInclude(Tok.getLocation(),
reinterpret_cast<Module *>(
@@ -590,7 +623,6 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
- MaybeParseMicrosoftAttributes(attrs);
Result = ParseExternalDeclaration(attrs);
return false;
@@ -737,11 +769,17 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
: Sema::PCC_Namespace);
cutOffParsing();
return nullptr;
+ case tok::kw_export:
+ if (getLangOpts().ModulesTS) {
+ SingleDecl = ParseExportDeclaration();
+ break;
+ }
+ // This must be 'export template'. Parse it so we can diagnose our lack
+ // of support.
case tok::kw_using:
case tok::kw_namespace:
case tok::kw_typedef:
case tok::kw_template:
- case tok::kw_export: // As in 'export template'
case tok::kw_static_assert:
case tok::kw__Static_assert:
// A function definition cannot start with any of these keywords.
@@ -802,6 +840,11 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParseMicrosoftIfExistsExternalDeclaration();
return nullptr;
+ case tok::kw_module:
+ Diag(Tok, diag::err_unexpected_module_decl);
+ SkipUntil(tok::semi);
+ return nullptr;
+
default:
dont_know:
// We can't tell whether this is a function-definition or declaration yet.
@@ -853,11 +896,10 @@ bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
Tok.is(tok::kw_try); // X() try { ... }
}
-/// ParseDeclarationOrFunctionDefinition - Parse either a function-definition or
-/// a declaration. We can't tell which we have until we read up to the
-/// compound-statement in function-definition. TemplateParams, if
-/// non-NULL, provides the template parameters when we're parsing a
-/// C++ template-declaration.
+/// Parse either a function-definition or a declaration. We can't tell which
+/// we have until we read up to the compound-statement in function-definition.
+/// TemplateParams, if non-NULL, provides the template parameters when we're
+/// parsing a C++ template-declaration.
///
/// function-definition: [C99 6.9.1]
/// decl-specs declarator declaration-list[opt] compound-statement
@@ -873,6 +915,7 @@ Parser::DeclGroupPtrTy
Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS) {
+ MaybeParseMicrosoftAttributes(DS.getAttributes());
// Parse the common declaration-specifiers piece.
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC_top_level);
@@ -891,6 +934,8 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
DS, AnonRecord);
DS.complete(TheDecl);
+ if (getLangOpts().OpenCL)
+ Actions.setCurrentOpenCLExtensionForDecl(TheDecl);
if (AnonRecord) {
Decl* decls[] = {AnonRecord, TheDecl};
return Actions.BuildDeclaratorGroup(decls, /*TypeMayContainAuto=*/false);
@@ -952,7 +997,7 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributesWithRange &attrs,
// parsing c constructs and re-enter objc container scope
// afterwards.
ObjCDeclContextSwitch ObjCDC(*this);
-
+
return ParseDeclOrFunctionDefInternal(attrs, PDS, AS);
}
}
@@ -1495,6 +1540,8 @@ Parser::TryAnnotateName(bool IsAddressOfOperand,
NewEndLoc);
if (NewType.isUsable())
Ty = NewType.get();
+ else if (Tok.is(tok::eof)) // Nothing to do here, bail out...
+ return ANK_Error;
}
Tok.setKind(tok::annot_typename);
@@ -1726,6 +1773,8 @@ bool Parser::TryAnnotateTypeOrScopeTokenAfterScopeSpec(bool EnteringContext,
NewEndLoc);
if (NewType.isUsable())
Ty = NewType.get();
+ else if (Tok.is(tok::eof)) // Nothing to do here, bail out...
+ return false;
}
// This is a typename. Replace the current token in-place with an
@@ -1988,7 +2037,6 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
- MaybeParseMicrosoftAttributes(attrs);
DeclGroupPtrTy Result = ParseExternalDeclaration(attrs);
if (Result && !getCurScope()->getParent())
Actions.getASTConsumer().HandleTopLevelDecl(Result.get());
@@ -1996,51 +2044,122 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
Braces.consumeClose();
}
+/// Parse a C++ Modules TS module declaration, which appears at the beginning
+/// of a module interface, module partition, or module implementation file.
+///
+/// module-declaration: [Modules TS + P0273R0]
+/// 'module' module-kind[opt] module-name attribute-specifier-seq[opt] ';'
+/// module-kind:
+/// 'implementation'
+/// 'partition'
+///
+/// Note that the module-kind values are context-sensitive keywords.
+Parser::DeclGroupPtrTy Parser::ParseModuleDecl() {
+ assert(Tok.is(tok::kw_module) && getLangOpts().ModulesTS &&
+ "should not be parsing a module declaration");
+ SourceLocation ModuleLoc = ConsumeToken();
+
+ // Check for a module-kind.
+ Sema::ModuleDeclKind MDK = Sema::ModuleDeclKind::Module;
+ if (Tok.is(tok::identifier) && NextToken().is(tok::identifier)) {
+ if (Tok.getIdentifierInfo()->isStr("implementation"))
+ MDK = Sema::ModuleDeclKind::Implementation;
+ else if (Tok.getIdentifierInfo()->isStr("partition"))
+ MDK = Sema::ModuleDeclKind::Partition;
+ else {
+ Diag(Tok, diag::err_unexpected_module_kind) << Tok.getIdentifierInfo();
+ SkipUntil(tok::semi);
+ return nullptr;
+ }
+ ConsumeToken();
+ }
+
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
+ if (ParseModuleName(ModuleLoc, Path, /*IsImport*/false))
+ return nullptr;
+
+ ParsedAttributesWithRange Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(Attrs);
+ // We don't support any module attributes yet.
+ ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_module_attr);
+
+ ExpectAndConsumeSemi(diag::err_module_expected_semi);
+
+ return Actions.ActOnModuleDecl(ModuleLoc, MDK, Path);
+}
+
+/// Parse a module import declaration. This is essentially the same for
+/// Objective-C and the C++ Modules TS, except for the leading '@' (in ObjC)
+/// and the trailing optional attributes (in C++).
+///
+/// [ObjC] @import declaration:
+/// '@' 'import' module-name ';'
+/// [ModTS] module-import-declaration:
+/// 'import' module-name attribute-specifier-seq[opt] ';'
Parser::DeclGroupPtrTy Parser::ParseModuleImport(SourceLocation AtLoc) {
- assert(Tok.isObjCAtKeyword(tok::objc_import) &&
+ assert((AtLoc.isInvalid() ? Tok.is(tok::kw_import)
+ : Tok.isObjCAtKeyword(tok::objc_import)) &&
"Improper start to module import");
SourceLocation ImportLoc = ConsumeToken();
+ SourceLocation StartLoc = AtLoc.isInvalid() ? ImportLoc : AtLoc;
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
-
+ if (ParseModuleName(ImportLoc, Path, /*IsImport*/true))
+ return nullptr;
+
+ ParsedAttributesWithRange Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(Attrs);
+ // We don't support any module import attributes yet.
+ ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_import_attr);
+
+ if (PP.hadModuleLoaderFatalFailure()) {
+ // With a fatal failure in the module loader, we abort parsing.
+ cutOffParsing();
+ return nullptr;
+ }
+
+ DeclResult Import = Actions.ActOnModuleImport(StartLoc, ImportLoc, Path);
+ ExpectAndConsumeSemi(diag::err_module_expected_semi);
+ if (Import.isInvalid())
+ return nullptr;
+
+ return Actions.ConvertDeclToDeclGroup(Import.get());
+}
+
+/// Parse a C++ Modules TS / Objective-C module name (both forms use the same
+/// grammar).
+///
+/// module-name:
+/// module-name-qualifier[opt] identifier
+/// module-name-qualifier:
+/// module-name-qualifier[opt] identifier '.'
+bool Parser::ParseModuleName(
+ SourceLocation UseLoc,
+ SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
+ bool IsImport) {
// Parse the module path.
- do {
+ while (true) {
if (!Tok.is(tok::identifier)) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteModuleImport(ImportLoc, Path);
+ Actions.CodeCompleteModuleImport(UseLoc, Path);
cutOffParsing();
- return nullptr;
+ return true;
}
- Diag(Tok, diag::err_module_expected_ident);
+ Diag(Tok, diag::err_module_expected_ident) << IsImport;
SkipUntil(tok::semi);
- return nullptr;
+ return true;
}
// Record this part of the module path.
Path.push_back(std::make_pair(Tok.getIdentifierInfo(), Tok.getLocation()));
ConsumeToken();
-
- if (Tok.is(tok::period)) {
- ConsumeToken();
- continue;
- }
-
- break;
- } while (true);
- if (PP.hadModuleLoaderFatalFailure()) {
- // With a fatal failure in the module loader, we abort parsing.
- cutOffParsing();
- return nullptr;
- }
-
- DeclResult Import = Actions.ActOnModuleImport(AtLoc, ImportLoc, Path);
- ExpectAndConsumeSemi(diag::err_module_expected_semi);
- if (Import.isInvalid())
- return nullptr;
+ if (Tok.isNot(tok::period))
+ return false;
- return Actions.ConvertDeclToDeclGroup(Import.get());
+ ConsumeToken();
+ }
}
/// \brief Try recover parser when module annotation appears where it must not
@@ -2051,19 +2170,35 @@ bool Parser::parseMisplacedModuleImport() {
while (true) {
switch (Tok.getKind()) {
case tok::annot_module_end:
+ // If we recovered from a misplaced module begin, we expect to hit a
+ // misplaced module end too. Stay in the current context when this
+ // happens.
+ if (MisplacedModuleBeginCount) {
+ --MisplacedModuleBeginCount;
+ Actions.ActOnModuleEnd(Tok.getLocation(),
+ reinterpret_cast<Module *>(
+ Tok.getAnnotationValue()));
+ ConsumeToken();
+ continue;
+ }
// Inform caller that recovery failed, the error must be handled at upper
- // level.
+ // level. This will generate the desired "missing '}' at end of module"
+ // diagnostics on the way out.
return true;
case tok::annot_module_begin:
- Actions.diagnoseMisplacedModuleImport(reinterpret_cast<Module *>(
- Tok.getAnnotationValue()), Tok.getLocation());
- return true;
+ // Recover by entering the module (Sema will diagnose).
+ Actions.ActOnModuleBegin(Tok.getLocation(),
+ reinterpret_cast<Module *>(
+ Tok.getAnnotationValue()));
+ ConsumeToken();
+ ++MisplacedModuleBeginCount;
+ continue;
case tok::annot_module_include:
// Module import found where it should not be, for instance, inside a
// namespace. Recover by importing the module.
Actions.ActOnModuleInclude(Tok.getLocation(),
reinterpret_cast<Module *>(
- Tok.getAnnotationValue()));
+ Tok.getAnnotationValue()));
ConsumeToken();
// If there is another module import, process it.
continue;
diff --git a/lib/Rewrite/HTMLRewrite.cpp b/lib/Rewrite/HTMLRewrite.cpp
index 2d82d8fd4bd1..27bb976a6e1a 100644
--- a/lib/Rewrite/HTMLRewrite.cpp
+++ b/lib/Rewrite/HTMLRewrite.cpp
@@ -267,8 +267,8 @@ void html::AddLineNumbers(Rewriter& R, FileID FID) {
RB.InsertTextAfter(FileEnd - FileBeg, "</table>");
}
-void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
- const char *title) {
+void html::AddHeaderFooterInternalBuiltinCSS(Rewriter &R, FileID FID,
+ StringRef title) {
const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
const char* FileStart = Buf->getBufferStart();
@@ -282,7 +282,7 @@ void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
os << "<!doctype html>\n" // Use HTML 5 doctype
"<html>\n<head>\n";
- if (title)
+ if (!title.empty())
os << "<title>" << html::EscapeText(title) << "</title>\n";
os << "<style type=\"text/css\">\n"
@@ -301,6 +301,7 @@ void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
" .macro:hover .expansion { display: block; border: 2px solid #FF0000; "
"padding: 2px; background-color:#FFF0F0; font-weight: normal; "
" -webkit-border-radius:5px; -webkit-box-shadow:1px 1px 7px #000; "
+ " border-radius:5px; box-shadow:1px 1px 7px #000; "
"position: absolute; top: -1em; left:10em; z-index: 1 } \n"
" .macro { color: darkmagenta; background-color:LemonChiffon;"
// Macros are position: relative to provide base for expansions.
@@ -311,7 +312,9 @@ void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
" .line { padding-left: 1ex; border-left: 3px solid #ccc }\n"
" .line { white-space: pre }\n"
" .msg { -webkit-box-shadow:1px 1px 7px #000 }\n"
+ " .msg { box-shadow:1px 1px 7px #000 }\n"
" .msg { -webkit-border-radius:5px }\n"
+ " .msg { border-radius:5px }\n"
" .msg { font-family:Helvetica, sans-serif; font-size:8pt }\n"
" .msg { float:left }\n"
" .msg { padding:0.25em 1ex 0.25em 1ex }\n"
@@ -321,11 +324,13 @@ void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
" .msgT { padding:0x; spacing:0x }\n"
" .msgEvent { background-color:#fff8b4; color:#000000 }\n"
" .msgControl { background-color:#bbbbbb; color:#000000 }\n"
+ " .msgNote { background-color:#ddeeff; color:#000000 }\n"
" .mrange { background-color:#dfddf3 }\n"
" .mrange { border-bottom:1px solid #6F9DBE }\n"
" .PathIndex { font-weight: bold; padding:0px 5px; "
"margin-right:5px; }\n"
" .PathIndex { -webkit-border-radius:8px }\n"
+ " .PathIndex { border-radius:8px }\n"
" .PathIndexEvent { background-color:#bfba87 }\n"
" .PathIndexControl { background-color:#8c8c8c }\n"
" .PathNav a { text-decoration:none; font-size: larger }\n"
@@ -339,8 +344,12 @@ void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
" border-collapse: collapse; border-spacing: 0px;\n"
" }\n"
" td.rowname {\n"
- " text-align:right; font-weight:bold; color:#444444;\n"
- " padding-right:2ex; }\n"
+ " text-align: right;\n"
+ " vertical-align: top;\n"
+ " font-weight: bold;\n"
+ " color:#444444;\n"
+ " padding-right:2ex;\n"
+ " }\n"
"</style>\n</head>\n<body>";
// Generate header
diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp
index 67762bde3439..5953d020b4fb 100644
--- a/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/lib/Sema/AnalysisBasedWarnings.cpp
@@ -37,12 +37,8 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
-#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/BitVector.h"
-#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -50,7 +46,6 @@
#include <algorithm>
#include <deque>
#include <iterator>
-#include <vector>
using namespace clang;
@@ -370,7 +365,7 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
CFGStmt CS = ri->castAs<CFGStmt>();
const Stmt *S = CS.getStmt();
- if (isa<ReturnStmt>(S)) {
+ if (isa<ReturnStmt>(S) || isa<CoreturnStmt>(S)) {
HasLiveReturn = true;
continue;
}
@@ -421,7 +416,7 @@ struct CheckFallThroughDiagnostics {
unsigned diag_AlwaysFallThrough_HasNoReturn;
unsigned diag_AlwaysFallThrough_ReturnsNonVoid;
unsigned diag_NeverFallThroughOrReturn;
- enum { Function, Block, Lambda } funMode;
+ enum { Function, Block, Lambda, Coroutine } funMode;
SourceLocation FuncLoc;
static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) {
@@ -457,6 +452,19 @@ struct CheckFallThroughDiagnostics {
return D;
}
+ static CheckFallThroughDiagnostics MakeForCoroutine(const Decl *Func) {
+ CheckFallThroughDiagnostics D;
+ D.FuncLoc = Func->getLocation();
+ D.diag_MaybeFallThrough_HasNoReturn = 0;
+ D.diag_MaybeFallThrough_ReturnsNonVoid =
+ diag::warn_maybe_falloff_nonvoid_coroutine;
+ D.diag_AlwaysFallThrough_HasNoReturn = 0;
+ D.diag_AlwaysFallThrough_ReturnsNonVoid =
+ diag::warn_falloff_nonvoid_coroutine;
+ D.funMode = Coroutine;
+ return D;
+ }
+
static CheckFallThroughDiagnostics MakeForBlock() {
CheckFallThroughDiagnostics D;
D.diag_MaybeFallThrough_HasNoReturn =
@@ -499,7 +507,13 @@ struct CheckFallThroughDiagnostics {
(!ReturnsVoid ||
D.isIgnored(diag::warn_suggest_noreturn_block, FuncLoc));
}
-
+ if (funMode == Coroutine) {
+ return (ReturnsVoid ||
+ D.isIgnored(diag::warn_maybe_falloff_nonvoid_function, FuncLoc) ||
+ D.isIgnored(diag::warn_maybe_falloff_nonvoid_coroutine,
+ FuncLoc)) &&
+ (!HasNoReturn);
+ }
// For blocks / lambdas.
return ReturnsVoid && !HasNoReturn;
}
@@ -519,11 +533,14 @@ static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
bool ReturnsVoid = false;
bool HasNoReturn = false;
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- ReturnsVoid = FD->getReturnType()->isVoidType();
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *CBody = dyn_cast<CoroutineBodyStmt>(Body))
+ ReturnsVoid = CBody->getFallthroughHandler() != nullptr;
+ else
+ ReturnsVoid = FD->getReturnType()->isVoidType();
HasNoReturn = FD->isNoReturn();
}
- else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
ReturnsVoid = MD->getReturnType()->isVoidType();
HasNoReturn = MD->hasAttr<NoReturnAttr>();
}
@@ -1991,13 +2008,22 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
// Warning: check missing 'return'
if (P.enableCheckFallThrough) {
+ auto IsCoro = [&]() {
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->getBody() && isa<CoroutineBodyStmt>(FD->getBody()))
+ return true;
+ return false;
+ };
const CheckFallThroughDiagnostics &CD =
- (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock()
- : (isa<CXXMethodDecl>(D) &&
- cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call &&
- cast<CXXMethodDecl>(D)->getParent()->isLambda())
- ? CheckFallThroughDiagnostics::MakeForLambda()
- : CheckFallThroughDiagnostics::MakeForFunction(D));
+ (isa<BlockDecl>(D)
+ ? CheckFallThroughDiagnostics::MakeForBlock()
+ : (isa<CXXMethodDecl>(D) &&
+ cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call &&
+ cast<CXXMethodDecl>(D)->getParent()->isLambda())
+ ? CheckFallThroughDiagnostics::MakeForLambda()
+ : (IsCoro()
+ ? CheckFallThroughDiagnostics::MakeForCoroutine(D)
+ : CheckFallThroughDiagnostics::MakeForFunction(D)));
CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC);
}
diff --git a/lib/Sema/AttributeList.cpp b/lib/Sema/AttributeList.cpp
index cae9393f9f3a..55e9601bf5e5 100644
--- a/lib/Sema/AttributeList.cpp
+++ b/lib/Sema/AttributeList.cpp
@@ -20,7 +20,6 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringSwitch.h"
using namespace clang;
IdentifierLoc *IdentifierLoc::create(ASTContext &Ctx, SourceLocation Loc,
@@ -63,7 +62,7 @@ void *AttributeFactory::allocate(size_t size) {
}
// Otherwise, allocate something new.
- return Alloc.Allocate(size, llvm::AlignOf<AttributeFactory>::Alignment);
+ return Alloc.Allocate(size, alignof(AttributeFactory));
}
void AttributeFactory::reclaimPool(AttributeList *cur) {
diff --git a/lib/Sema/CodeCompleteConsumer.cpp b/lib/Sema/CodeCompleteConsumer.cpp
index 9a4f0d921bf4..f5b0104462f7 100644
--- a/lib/Sema/CodeCompleteConsumer.cpp
+++ b/lib/Sema/CodeCompleteConsumer.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/Sema.h"
+#include "clang/Lex/Preprocessor.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
@@ -327,9 +328,9 @@ StringRef CodeCompletionTUInfo::getParentName(const DeclContext *DC) {
CodeCompletionString *CodeCompletionBuilder::TakeString() {
void *Mem = getAllocator().Allocate(
- sizeof(CodeCompletionString) + sizeof(Chunk) * Chunks.size()
- + sizeof(const char *) * Annotations.size(),
- llvm::alignOf<CodeCompletionString>());
+ sizeof(CodeCompletionString) + sizeof(Chunk) * Chunks.size() +
+ sizeof(const char *) * Annotations.size(),
+ alignof(CodeCompletionString));
CodeCompletionString *Result
= new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(),
Priority, Availability,
@@ -428,6 +429,26 @@ CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
CodeCompleteConsumer::~CodeCompleteConsumer() { }
+bool PrintingCodeCompleteConsumer::isResultFilteredOut(StringRef Filter,
+ CodeCompletionResult Result) {
+ switch (Result.Kind) {
+ case CodeCompletionResult::RK_Declaration: {
+ return !(Result.Declaration->getIdentifier() &&
+ Result.Declaration->getIdentifier()->getName().startswith(Filter));
+ }
+ case CodeCompletionResult::RK_Keyword: {
+ return !StringRef(Result.Keyword).startswith(Filter);
+ }
+ case CodeCompletionResult::RK_Macro: {
+ return !Result.Macro->getName().startswith(Filter);
+ }
+ case CodeCompletionResult::RK_Pattern: {
+ return !StringRef(Result.Pattern->getAsString()).startswith(Filter);
+ }
+ }
+ llvm_unreachable("Unknown code completion result Kind.");
+}
+
void
PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
CodeCompletionContext Context,
@@ -435,8 +456,12 @@ PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
unsigned NumResults) {
std::stable_sort(Results, Results + NumResults);
+ StringRef Filter = SemaRef.getPreprocessor().getCodeCompletionFilter();
+
// Print the results.
for (unsigned I = 0; I != NumResults; ++I) {
+ if(!Filter.empty() && isResultFilteredOut(Filter, Results[I]))
+ continue;
OS << "COMPLETION: ";
switch (Results[I].Kind) {
case CodeCompletionResult::RK_Declaration:
diff --git a/lib/Sema/DeclSpec.cpp b/lib/Sema/DeclSpec.cpp
index b9d2843b0558..a55cdcccee5d 100644
--- a/lib/Sema/DeclSpec.cpp
+++ b/lib/Sema/DeclSpec.cpp
@@ -173,6 +173,8 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
unsigned NumExceptions,
Expr *NoexceptExpr,
CachedTokens *ExceptionSpecTokens,
+ ArrayRef<NamedDecl*>
+ DeclsInPrototype,
SourceLocation LocalRangeBegin,
SourceLocation LocalRangeEnd,
Declarator &TheDeclarator,
@@ -204,7 +206,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
I.Fun.ExceptionSpecType = ESpecType;
I.Fun.ExceptionSpecLocBeg = ESpecRange.getBegin().getRawEncoding();
I.Fun.ExceptionSpecLocEnd = ESpecRange.getEnd().getRawEncoding();
- I.Fun.NumExceptions = 0;
+ I.Fun.NumExceptionsOrDecls = 0;
I.Fun.Exceptions = nullptr;
I.Fun.NoexceptExpr = nullptr;
I.Fun.HasTrailingReturnType = TrailingReturnType.isUsable() ||
@@ -220,16 +222,18 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
// parameter list there (in an effort to avoid new/delete traffic). If it
// is already used (consider a function returning a function pointer) or too
// small (function with too many parameters), go to the heap.
- if (!TheDeclarator.InlineParamsUsed &&
+ if (!TheDeclarator.InlineStorageUsed &&
NumParams <= llvm::array_lengthof(TheDeclarator.InlineParams)) {
I.Fun.Params = TheDeclarator.InlineParams;
+ new (I.Fun.Params) ParamInfo[NumParams];
I.Fun.DeleteParams = false;
- TheDeclarator.InlineParamsUsed = true;
+ TheDeclarator.InlineStorageUsed = true;
} else {
I.Fun.Params = new DeclaratorChunk::ParamInfo[NumParams];
I.Fun.DeleteParams = true;
}
- memcpy(I.Fun.Params, Params, sizeof(Params[0]) * NumParams);
+ for (unsigned i = 0; i < NumParams; i++)
+ I.Fun.Params[i] = std::move(Params[i]);
}
// Check what exception specification information we should actually store.
@@ -238,7 +242,7 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
case EST_Dynamic:
// new[] an exception array if needed
if (NumExceptions) {
- I.Fun.NumExceptions = NumExceptions;
+ I.Fun.NumExceptionsOrDecls = NumExceptions;
I.Fun.Exceptions = new DeclaratorChunk::TypeAndRange[NumExceptions];
for (unsigned i = 0; i != NumExceptions; ++i) {
I.Fun.Exceptions[i].Ty = Exceptions[i];
@@ -255,9 +259,52 @@ DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto,
I.Fun.ExceptionSpecTokens = ExceptionSpecTokens;
break;
}
+
+ if (!DeclsInPrototype.empty()) {
+ assert(ESpecType == EST_None && NumExceptions == 0 &&
+ "cannot have exception specifiers and decls in prototype");
+ I.Fun.NumExceptionsOrDecls = DeclsInPrototype.size();
+ // Copy the array of decls into stable heap storage.
+ I.Fun.DeclsInPrototype = new NamedDecl *[DeclsInPrototype.size()];
+ for (size_t J = 0; J < DeclsInPrototype.size(); ++J)
+ I.Fun.DeclsInPrototype[J] = DeclsInPrototype[J];
+ }
+
return I;
}
+void Declarator::setDecompositionBindings(
+ SourceLocation LSquareLoc,
+ ArrayRef<DecompositionDeclarator::Binding> Bindings,
+ SourceLocation RSquareLoc) {
+ assert(!hasName() && "declarator given multiple names!");
+
+ BindingGroup.LSquareLoc = LSquareLoc;
+ BindingGroup.RSquareLoc = RSquareLoc;
+ BindingGroup.NumBindings = Bindings.size();
+ Range.setEnd(RSquareLoc);
+
+ // We're now past the identifier.
+ SetIdentifier(nullptr, LSquareLoc);
+ Name.EndLocation = RSquareLoc;
+
+ // Allocate storage for bindings and stash them away.
+ if (Bindings.size()) {
+ if (!InlineStorageUsed &&
+ Bindings.size() <= llvm::array_lengthof(InlineBindings)) {
+ BindingGroup.Bindings = InlineBindings;
+ BindingGroup.DeleteBindings = false;
+ InlineStorageUsed = true;
+ } else {
+ BindingGroup.Bindings =
+ new DecompositionDeclarator::Binding[Bindings.size()];
+ BindingGroup.DeleteBindings = true;
+ }
+ std::uninitialized_copy(Bindings.begin(), Bindings.end(),
+ BindingGroup.Bindings);
+ }
+}
+
bool Declarator::isDeclarationOfFunction() const {
for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) {
switch (DeclTypeInfo[i].Kind) {
@@ -511,7 +558,7 @@ bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
// OpenCL v1.2 s6.8 changes this to "The auto and register storage-class
// specifiers are not supported."
if (S.getLangOpts().OpenCL &&
- !S.getOpenCLOptions().cl_clang_storage_class_specifiers) {
+ !S.getOpenCLOptions().isEnabled("cl_clang_storage_class_specifiers")) {
switch (SC) {
case SCS_extern:
case SCS_private_extern:
@@ -578,14 +625,16 @@ bool DeclSpec::SetTypeSpecWidth(TSW W, SourceLocation Loc,
const char *&PrevSpec,
unsigned &DiagID,
const PrintingPolicy &Policy) {
- // Overwrite TSWLoc only if TypeSpecWidth was unspecified, so that
+ // Overwrite TSWRange.Begin only if TypeSpecWidth was unspecified, so that
// for 'long long' we will keep the source location of the first 'long'.
if (TypeSpecWidth == TSW_unspecified)
- TSWLoc = Loc;
+ TSWRange.setBegin(Loc);
// Allow turning long -> long long.
else if (W != TSW_longlong || TypeSpecWidth != TSW_long)
return BadSpecifier(W, (TSW)TypeSpecWidth, PrevSpec, DiagID);
TypeSpecWidth = W;
+ // Remember location of the last 'long'
+ TSWRange.setEnd(Loc);
return false;
}
@@ -965,9 +1014,9 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
TypeQualifiers)) {
const unsigned NumLocs = 9;
SourceLocation ExtraLocs[NumLocs] = {
- TSWLoc, TSCLoc, TSSLoc, AltiVecLoc,
- TQ_constLoc, TQ_restrictLoc, TQ_volatileLoc, TQ_atomicLoc, TQ_unalignedLoc
- };
+ TSWRange.getBegin(), TSCLoc, TSSLoc,
+ AltiVecLoc, TQ_constLoc, TQ_restrictLoc,
+ TQ_volatileLoc, TQ_atomicLoc, TQ_unalignedLoc};
FixItHint Hints[NumLocs];
SourceLocation FirstLoc;
for (unsigned I = 0; I != NumLocs; ++I) {
@@ -1009,8 +1058,8 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// Only 'short' and 'long long' are valid with vector bool. (PIM 2.1)
if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short) &&
(TypeSpecWidth != TSW_longlong))
- S.Diag(TSWLoc, diag::err_invalid_vector_bool_decl_spec)
- << getSpecifierName((TSW)TypeSpecWidth);
+ S.Diag(TSWRange.getBegin(), diag::err_invalid_vector_bool_decl_spec)
+ << getSpecifierName((TSW)TypeSpecWidth);
// vector bool long long requires VSX support or ZVector.
if ((TypeSpecWidth == TSW_longlong) &&
@@ -1027,7 +1076,8 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
// vector long double and vector long long double are never allowed.
// vector double is OK for Power7 and later, and ZVector.
if (TypeSpecWidth == TSW_long || TypeSpecWidth == TSW_longlong)
- S.Diag(TSWLoc, diag::err_invalid_vector_long_double_decl_spec);
+ S.Diag(TSWRange.getBegin(),
+ diag::err_invalid_vector_long_double_decl_spec);
else if (!S.Context.getTargetInfo().hasFeature("vsx") &&
!S.getLangOpts().ZVector)
S.Diag(TSTLoc, diag::err_invalid_vector_double_decl_spec);
@@ -1038,10 +1088,11 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
} else if (TypeSpecWidth == TSW_long) {
// vector long is unsupported for ZVector and deprecated for AltiVec.
if (S.getLangOpts().ZVector)
- S.Diag(TSWLoc, diag::err_invalid_vector_long_decl_spec);
+ S.Diag(TSWRange.getBegin(), diag::err_invalid_vector_long_decl_spec);
else
- S.Diag(TSWLoc, diag::warn_vector_long_decl_spec_combination)
- << getSpecifierName((TST)TypeSpecType, Policy);
+ S.Diag(TSWRange.getBegin(),
+ diag::warn_vector_long_decl_spec_combination)
+ << getSpecifierName((TST)TypeSpecType, Policy);
}
if (TypeAltiVecPixel) {
@@ -1074,8 +1125,8 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // short -> short int, long long -> long long int.
else if (TypeSpecType != TST_int) {
- S.Diag(TSWLoc, diag::err_invalid_width_spec) << (int)TypeSpecWidth
- << getSpecifierName((TST)TypeSpecType, Policy);
+ S.Diag(TSWRange.getBegin(), diag::err_invalid_width_spec)
+ << (int)TypeSpecWidth << getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecType = TST_int;
TypeSpecOwned = false;
}
@@ -1084,8 +1135,8 @@ void DeclSpec::Finish(Sema &S, const PrintingPolicy &Policy) {
if (TypeSpecType == TST_unspecified)
TypeSpecType = TST_int; // long -> long int.
else if (TypeSpecType != TST_int && TypeSpecType != TST_double) {
- S.Diag(TSWLoc, diag::err_invalid_width_spec) << (int)TypeSpecWidth
- << getSpecifierName((TST)TypeSpecType, Policy);
+ S.Diag(TSWRange.getBegin(), diag::err_invalid_width_spec)
+ << (int)TypeSpecWidth << getSpecifierName((TST)TypeSpecType, Policy);
TypeSpecType = TST_int;
TypeSpecOwned = false;
}
@@ -1267,6 +1318,7 @@ bool VirtSpecifiers::SetSpecifier(Specifier VS, SourceLocation Loc,
switch (VS) {
default: llvm_unreachable("Unknown specifier!");
case VS_Override: VS_overrideLoc = Loc; break;
+ case VS_GNU_Final:
case VS_Sealed:
case VS_Final: VS_finalLoc = Loc; break;
}
@@ -1279,6 +1331,7 @@ const char *VirtSpecifiers::getSpecifierName(Specifier VS) {
default: llvm_unreachable("Unknown specifier");
case VS_Override: return "override";
case VS_Final: return "final";
+ case VS_GNU_Final: return "__final";
case VS_Sealed: return "sealed";
}
}
diff --git a/lib/Sema/DelayedDiagnostic.cpp b/lib/Sema/DelayedDiagnostic.cpp
index ceea04f276c9..2fa5718d4e9b 100644
--- a/lib/Sema/DelayedDiagnostic.cpp
+++ b/lib/Sema/DelayedDiagnostic.cpp
@@ -20,7 +20,7 @@ using namespace clang;
using namespace sema;
DelayedDiagnostic
-DelayedDiagnostic::makeAvailability(Sema::AvailabilityDiagnostic AD,
+DelayedDiagnostic::makeAvailability(AvailabilityResult AR,
SourceLocation Loc,
const NamedDecl *D,
const ObjCInterfaceDecl *UnknownObjCClass,
@@ -28,42 +28,33 @@ DelayedDiagnostic::makeAvailability(Sema::AvailabilityDiagnostic AD,
StringRef Msg,
bool ObjCPropertyAccess) {
DelayedDiagnostic DD;
- switch (AD) {
- case Sema::AD_Deprecation:
- DD.Kind = Deprecation;
- break;
- case Sema::AD_Unavailable:
- DD.Kind = Unavailable;
- break;
- case Sema::AD_Partial:
- llvm_unreachable("AD_Partial diags should not be delayed");
- }
+ DD.Kind = Availability;
DD.Triggered = false;
DD.Loc = Loc;
- DD.DeprecationData.Decl = D;
- DD.DeprecationData.UnknownObjCClass = UnknownObjCClass;
- DD.DeprecationData.ObjCProperty = ObjCProperty;
+ DD.AvailabilityData.Decl = D;
+ DD.AvailabilityData.UnknownObjCClass = UnknownObjCClass;
+ DD.AvailabilityData.ObjCProperty = ObjCProperty;
char *MessageData = nullptr;
if (Msg.size()) {
MessageData = new char [Msg.size()];
memcpy(MessageData, Msg.data(), Msg.size());
}
- DD.DeprecationData.Message = MessageData;
- DD.DeprecationData.MessageLen = Msg.size();
- DD.DeprecationData.ObjCPropertyAccess = ObjCPropertyAccess;
+ DD.AvailabilityData.Message = MessageData;
+ DD.AvailabilityData.MessageLen = Msg.size();
+ DD.AvailabilityData.AR = AR;
+ DD.AvailabilityData.ObjCPropertyAccess = ObjCPropertyAccess;
return DD;
}
void DelayedDiagnostic::Destroy() {
- switch (static_cast<DDKind>(Kind)) {
+ switch (Kind) {
case Access:
getAccessData().~AccessedEntity();
break;
- case Deprecation:
- case Unavailable:
- delete [] DeprecationData.Message;
+ case Availability:
+ delete[] AvailabilityData.Message;
break;
case ForbiddenType:
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index bdbe06c4969d..899d3fa83cc3 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -325,30 +325,27 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
case Stmt::IfStmtClass: {
IfStmt *IS = cast<IfStmt>(S);
- if (!IS->isConstexpr())
+ if (!(IS->isConstexpr() || IS->isObjCAvailabilityCheck()))
break;
+ unsigned Diag = IS->isConstexpr() ? diag::note_protected_by_constexpr_if
+ : diag::note_protected_by_if_available;
+
if (VarDecl *Var = IS->getConditionVariable())
BuildScopeInformation(Var, ParentScope);
// Cannot jump into the middle of the condition.
unsigned NewParentScope = Scopes.size();
- Scopes.push_back(GotoScope(ParentScope,
- diag::note_protected_by_constexpr_if, 0,
- IS->getLocStart()));
+ Scopes.push_back(GotoScope(ParentScope, Diag, 0, IS->getLocStart()));
BuildScopeInformation(IS->getCond(), NewParentScope);
// Jumps into either arm of an 'if constexpr' are not allowed.
NewParentScope = Scopes.size();
- Scopes.push_back(GotoScope(ParentScope,
- diag::note_protected_by_constexpr_if, 0,
- IS->getLocStart()));
+ Scopes.push_back(GotoScope(ParentScope, Diag, 0, IS->getLocStart()));
BuildScopeInformation(IS->getThen(), NewParentScope);
if (Stmt *Else = IS->getElse()) {
NewParentScope = Scopes.size();
- Scopes.push_back(GotoScope(ParentScope,
- diag::note_protected_by_constexpr_if, 0,
- IS->getLocStart()));
+ Scopes.push_back(GotoScope(ParentScope, Diag, 0, IS->getLocStart()));
BuildScopeInformation(Else, NewParentScope);
}
return;
@@ -553,10 +550,8 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
// order to avoid blowing out the stack.
while (true) {
Stmt *Next;
- if (CaseStmt *CS = dyn_cast<CaseStmt>(SubStmt))
- Next = CS->getSubStmt();
- else if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SubStmt))
- Next = DS->getSubStmt();
+ if (SwitchCase *SC = dyn_cast<SwitchCase>(SubStmt))
+ Next = SC->getSubStmt();
else if (LabelStmt *LS = dyn_cast<LabelStmt>(SubStmt))
Next = LS->getSubStmt();
else
diff --git a/lib/Sema/MultiplexExternalSemaSource.cpp b/lib/Sema/MultiplexExternalSemaSource.cpp
index eee4c00324ba..077a56ff8e7f 100644
--- a/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -285,7 +285,8 @@ void MultiplexExternalSemaSource::ReadPendingInstantiations(
}
void MultiplexExternalSemaSource::ReadLateParsedTemplates(
- llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> &LPTMap) {
+ llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>>
+ &LPTMap) {
for (size_t i = 0; i < Sources.size(); ++i)
Sources[i]->ReadLateParsedTemplates(LPTMap);
}
diff --git a/lib/Sema/ScopeInfo.cpp b/lib/Sema/ScopeInfo.cpp
index 4b2e13e20deb..3970b4136982 100644
--- a/lib/Sema/ScopeInfo.cpp
+++ b/lib/Sema/ScopeInfo.cpp
@@ -29,6 +29,8 @@ void FunctionScopeInfo::Clear() {
HasIndirectGoto = false;
HasDroppedStmt = false;
HasOMPDeclareReductionCombiner = false;
+ HasFallthroughStmt = false;
+ HasPotentialAvailabilityViolations = false;
ObjCShouldCallSuper = false;
ObjCIsDesignatedInit = false;
ObjCWarnForNoDesignatedInitChain = false;
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index 777747606304..412f944f89c0 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/DeclCXX.h"
@@ -22,7 +21,6 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/DiagnosticOptions.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/HeaderSearch.h"
@@ -30,14 +28,15 @@
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/DelayedDiagnostic.h"
#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/Initialization.h"
#include "clang/Sema/MultiplexExternalSemaSource.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaConsumer.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/TemplateDeduction.h"
-#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
using namespace clang;
@@ -89,15 +88,14 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
VisContext(nullptr),
IsBuildingRecoveryCallExpr(false),
Cleanup{}, LateTemplateParser(nullptr),
- LateTemplateParserCleanup(nullptr),
- OpaqueParser(nullptr), IdResolver(pp), StdInitializerList(nullptr),
+ LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
+ StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
CXXTypeInfoDecl(nullptr), MSVCGuidDecl(nullptr),
NSNumberDecl(nullptr), NSValueDecl(nullptr),
NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr),
ValueWithBytesObjCTypeMethod(nullptr),
NSArrayDecl(nullptr), ArrayWithObjectsMethod(nullptr),
NSDictionaryDecl(nullptr), DictionaryWithObjectsMethod(nullptr),
- MSAsmLabelNameCounter(0),
GlobalNewDeleteDeclared(false),
TUKind(TUKind),
NumSFINAEErrors(0),
@@ -209,14 +207,11 @@ void Sema::Initialize() {
addImplicitTypedef("size_t", Context.getSizeType());
}
- // Initialize predefined OpenCL types and supported optional core features.
+ // Initialize predefined OpenCL types and supported extensions and (optional)
+ // core features.
if (getLangOpts().OpenCL) {
-#define OPENCLEXT(Ext) \
- if (Context.getTargetInfo().getSupportedOpenCLOpts().is_##Ext##_supported_core( \
- getLangOpts().OpenCLVersion)) \
- getOpenCLOptions().Ext = 1;
-#include "clang/Basic/OpenCLExtensions.def"
-
+ getOpenCLOptions().addSupport(Context.getTargetInfo().getSupportedOpenCLOpts());
+ getOpenCLOptions().enableSupportedCore(getLangOpts().OpenCLVersion);
addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
addImplicitTypedef("event_t", Context.OCLEventTy);
if (getLangOpts().OpenCLVersion >= 200) {
@@ -227,26 +222,60 @@ void Sema::Initialize() {
addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
addImplicitTypedef("atomic_uint",
Context.getAtomicType(Context.UnsignedIntTy));
- addImplicitTypedef("atomic_long", Context.getAtomicType(Context.LongTy));
- addImplicitTypedef("atomic_ulong",
- Context.getAtomicType(Context.UnsignedLongTy));
+ auto AtomicLongT = Context.getAtomicType(Context.LongTy);
+ addImplicitTypedef("atomic_long", AtomicLongT);
+ auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy);
+ addImplicitTypedef("atomic_ulong", AtomicULongT);
addImplicitTypedef("atomic_float",
Context.getAtomicType(Context.FloatTy));
- addImplicitTypedef("atomic_double",
- Context.getAtomicType(Context.DoubleTy));
+ auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy);
+ addImplicitTypedef("atomic_double", AtomicDoubleT);
// OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
// 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy));
- addImplicitTypedef("atomic_intptr_t",
- Context.getAtomicType(Context.getIntPtrType()));
- addImplicitTypedef("atomic_uintptr_t",
- Context.getAtomicType(Context.getUIntPtrType()));
- addImplicitTypedef("atomic_size_t",
- Context.getAtomicType(Context.getSizeType()));
- addImplicitTypedef("atomic_ptrdiff_t",
- Context.getAtomicType(Context.getPointerDiffType()));
+ auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType());
+ addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT);
+ auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType());
+ addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT);
+ auto AtomicSizeT = Context.getAtomicType(Context.getSizeType());
+ addImplicitTypedef("atomic_size_t", AtomicSizeT);
+ auto AtomicPtrDiffT = Context.getAtomicType(Context.getPointerDiffType());
+ addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT);
+
+ // OpenCL v2.0 s6.13.11.6:
+ // - The atomic_long and atomic_ulong types are supported if the
+ // cl_khr_int64_base_atomics and cl_khr_int64_extended_atomics
+ // extensions are supported.
+ // - The atomic_double type is only supported if double precision
+ // is supported and the cl_khr_int64_base_atomics and
+ // cl_khr_int64_extended_atomics extensions are supported.
+ // - If the device address space is 64-bits, the data types
+ // atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
+ // atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
+ // cl_khr_int64_extended_atomics extensions are supported.
+ std::vector<QualType> Atomic64BitTypes;
+ Atomic64BitTypes.push_back(AtomicLongT);
+ Atomic64BitTypes.push_back(AtomicULongT);
+ Atomic64BitTypes.push_back(AtomicDoubleT);
+ if (Context.getTypeSize(AtomicSizeT) == 64) {
+ Atomic64BitTypes.push_back(AtomicSizeT);
+ Atomic64BitTypes.push_back(AtomicIntPtrT);
+ Atomic64BitTypes.push_back(AtomicUIntPtrT);
+ Atomic64BitTypes.push_back(AtomicPtrDiffT);
+ }
+ for (auto &I : Atomic64BitTypes)
+ setOpenCLExtensionForType(I,
+ "cl_khr_int64_base_atomics cl_khr_int64_extended_atomics");
+
+ setOpenCLExtensionForType(AtomicDoubleT, "cl_khr_fp64");
}
- }
+
+ setOpenCLExtensionForType(Context.DoubleTy, "cl_khr_fp64");
+
+#define GENERIC_IMAGE_TYPE_EXT(Type, Id, Ext) \
+ setOpenCLExtensionForType(Context.Id, Ext);
+#include "clang/Basic/OpenCLImageTypes.def"
+ };
if (Context.getTargetInfo().hasBuiltinMSVaList()) {
DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
@@ -260,7 +289,6 @@ void Sema::Initialize() {
}
Sema::~Sema() {
- llvm::DeleteContainerSeconds(LateParsedTemplateMap);
if (VisContext) FreeVisContext();
// Kill all the active scopes.
for (unsigned I = 1, E = FunctionScopes.size(); I != E; ++I)
@@ -393,6 +421,18 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
if (ExprTy == TypeTy)
return E;
+ // C++1z [conv.array]: The temporary materialization conversion is applied.
+ // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
+ if (Kind == CK_ArrayToPointerDecay && getLangOpts().CPlusPlus &&
+ E->getValueKind() == VK_RValue) {
+ // The temporary is an lvalue in C++98 and an xvalue otherwise.
+ ExprResult Materialized = CreateMaterializeTemporaryExpr(
+ E->getType(), E, !getLangOpts().CPlusPlus11);
+ if (Materialized.isInvalid())
+ return ExprError();
+ E = Materialized.get();
+ }
+
if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
ImpCast->setType(Ty);
@@ -710,7 +750,8 @@ void Sema::ActOnEndOfTranslationUnit() {
if (TUKind == TU_Prefix) {
// Translation unit prefixes don't need any of the checking below.
- TUScope = nullptr;
+ if (!PP.isIncrementalProcessingEnabled())
+ TUScope = nullptr;
return;
}
@@ -811,6 +852,7 @@ void Sema::ActOnEndOfTranslationUnit() {
diag::err_tentative_def_incomplete_type))
VD->setInvalidDecl();
+ // No initialization is performed for a tentative definition.
CheckCompleteVariableDeclaration(VD);
// Notify the consumer that we've completed a tentative definition.
@@ -865,8 +907,11 @@ void Sema::ActOnEndOfTranslationUnit() {
Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
<< /*variable*/1 << DiagD->getDeclName();
} else if (DiagD->getType().isConstQualified()) {
- Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
- << DiagD->getDeclName();
+ const SourceManager &SM = SourceMgr;
+ if (SM.getMainFileID() != SM.getFileID(DiagD->getLocation()) ||
+ !PP.getLangOpts().IsHeaderFile)
+ Diag(DiagD->getLocation(), diag::warn_unused_const_variable)
+ << DiagD->getDeclName();
} else {
Diag(DiagD->getLocation(), diag::warn_unused_variable)
<< DiagD->getDeclName();
@@ -909,7 +954,8 @@ void Sema::ActOnEndOfTranslationUnit() {
assert(ParsingInitForAutoVars.empty() &&
"Didn't unmark var as having its initializer parsed");
- TUScope = nullptr;
+ if (!PP.isIncrementalProcessingEnabled())
+ TUScope = nullptr;
}
@@ -1527,3 +1573,85 @@ const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
Sema::getMismatchingDeleteExpressions() const {
return DeleteExprs;
}
+
+void Sema::setOpenCLExtensionForType(QualType T, llvm::StringRef ExtStr) {
+ if (ExtStr.empty())
+ return;
+ llvm::SmallVector<StringRef, 1> Exts;
+ ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false);
+ auto CanT = T.getCanonicalType().getTypePtr();
+ for (auto &I : Exts)
+ OpenCLTypeExtMap[CanT].insert(I.str());
+}
+
+void Sema::setOpenCLExtensionForDecl(Decl *FD, StringRef ExtStr) {
+ llvm::SmallVector<StringRef, 1> Exts;
+ ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false);
+ if (Exts.empty())
+ return;
+ for (auto &I : Exts)
+ OpenCLDeclExtMap[FD].insert(I.str());
+}
+
+void Sema::setCurrentOpenCLExtensionForType(QualType T) {
+ if (CurrOpenCLExtension.empty())
+ return;
+ setOpenCLExtensionForType(T, CurrOpenCLExtension);
+}
+
+void Sema::setCurrentOpenCLExtensionForDecl(Decl *D) {
+ if (CurrOpenCLExtension.empty())
+ return;
+ setOpenCLExtensionForDecl(D, CurrOpenCLExtension);
+}
+
+bool Sema::isOpenCLDisabledDecl(Decl *FD) {
+ auto Loc = OpenCLDeclExtMap.find(FD);
+ if (Loc == OpenCLDeclExtMap.end())
+ return false;
+ for (auto &I : Loc->second) {
+ if (!getOpenCLOptions().isEnabled(I))
+ return true;
+ }
+ return false;
+}
+
+template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
+bool Sema::checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc,
+ DiagInfoT DiagInfo, MapT &Map,
+ unsigned Selector,
+ SourceRange SrcRange) {
+ auto Loc = Map.find(D);
+ if (Loc == Map.end())
+ return false;
+ bool Disabled = false;
+ for (auto &I : Loc->second) {
+ if (I != CurrOpenCLExtension && !getOpenCLOptions().isEnabled(I)) {
+ Diag(DiagLoc, diag::err_opencl_requires_extension) << Selector << DiagInfo
+ << I << SrcRange;
+ Disabled = true;
+ }
+ }
+ return Disabled;
+}
+
+bool Sema::checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType QT) {
+ // Check extensions for declared types.
+ Decl *Decl = nullptr;
+ if (auto TypedefT = dyn_cast<TypedefType>(QT.getTypePtr()))
+ Decl = TypedefT->getDecl();
+ if (auto TagT = dyn_cast<TagType>(QT.getCanonicalType().getTypePtr()))
+ Decl = TagT->getDecl();
+ auto Loc = DS.getTypeSpecTypeLoc();
+ if (checkOpenCLDisabledTypeOrDecl(Decl, Loc, QT, OpenCLDeclExtMap))
+ return true;
+
+ // Check extensions for builtin types.
+ return checkOpenCLDisabledTypeOrDecl(QT.getCanonicalType().getTypePtr(), Loc,
+ QT, OpenCLTypeExtMap);
+}
+
+bool Sema::checkOpenCLDisabledDecl(const Decl &D, const Expr &E) {
+ return checkOpenCLDisabledTypeOrDecl(&D, E.getLocStart(), "",
+ OpenCLDeclExtMap, 1, D.getSourceRange());
+}
diff --git a/lib/Sema/SemaAttr.cpp b/lib/Sema/SemaAttr.cpp
index 0d7fba5c6709..bad9e7024267 100644
--- a/lib/Sema/SemaAttr.cpp
+++ b/lib/Sema/SemaAttr.cpp
@@ -12,13 +12,13 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/SemaInternal.h"
using namespace clang;
//===----------------------------------------------------------------------===//
diff --git a/lib/Sema/SemaCUDA.cpp b/lib/Sema/SemaCUDA.cpp
index 90af6d5a927f..6f272ec839f5 100644
--- a/lib/Sema/SemaCUDA.cpp
+++ b/lib/Sema/SemaCUDA.cpp
@@ -18,11 +18,25 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
using namespace clang;
+void Sema::PushForceCUDAHostDevice() {
+ assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ ForceCUDAHostDeviceDepth++;
+}
+
+bool Sema::PopForceCUDAHostDevice() {
+ assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ if (ForceCUDAHostDeviceDepth == 0)
+ return false;
+ ForceCUDAHostDeviceDepth--;
+ return true;
+}
+
ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc) {
@@ -40,21 +54,73 @@ ExprResult Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
/*IsExecConfig=*/true);
}
+Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const AttributeList *Attr) {
+ bool HasHostAttr = false;
+ bool HasDeviceAttr = false;
+ bool HasGlobalAttr = false;
+ bool HasInvalidTargetAttr = false;
+ while (Attr) {
+ switch(Attr->getKind()){
+ case AttributeList::AT_CUDAGlobal:
+ HasGlobalAttr = true;
+ break;
+ case AttributeList::AT_CUDAHost:
+ HasHostAttr = true;
+ break;
+ case AttributeList::AT_CUDADevice:
+ HasDeviceAttr = true;
+ break;
+ case AttributeList::AT_CUDAInvalidTarget:
+ HasInvalidTargetAttr = true;
+ break;
+ default:
+ break;
+ }
+ Attr = Attr->getNext();
+ }
+ if (HasInvalidTargetAttr)
+ return CFT_InvalidTarget;
+
+ if (HasGlobalAttr)
+ return CFT_Global;
+
+ if (HasHostAttr && HasDeviceAttr)
+ return CFT_HostDevice;
+
+ if (HasDeviceAttr)
+ return CFT_Device;
+
+ return CFT_Host;
+}
+
+template <typename A>
+static bool hasAttr(const FunctionDecl *D, bool IgnoreImplicitAttr) {
+ return D->hasAttrs() && llvm::any_of(D->getAttrs(), [&](Attr *Attribute) {
+ return isa<A>(Attribute) &&
+ !(IgnoreImplicitAttr && Attribute->isImplicit());
+ });
+}
+
/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
-Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
+Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D,
+ bool IgnoreImplicitHDAttr) {
+ // Code that lives outside a function is run on the host.
+ if (D == nullptr)
+ return CFT_Host;
+
if (D->hasAttr<CUDAInvalidTargetAttr>())
return CFT_InvalidTarget;
if (D->hasAttr<CUDAGlobalAttr>())
return CFT_Global;
- if (D->hasAttr<CUDADeviceAttr>()) {
- if (D->hasAttr<CUDAHostAttr>())
+ if (hasAttr<CUDADeviceAttr>(D, IgnoreImplicitHDAttr)) {
+ if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr))
return CFT_HostDevice;
return CFT_Device;
- } else if (D->hasAttr<CUDAHostAttr>()) {
+ } else if (hasAttr<CUDAHostAttr>(D, IgnoreImplicitHDAttr)) {
return CFT_Host;
- } else if (D->isImplicit()) {
+ } else if (D->isImplicit() && !IgnoreImplicitHDAttr) {
// Some implicit declarations (like intrinsic functions) are not marked.
// Set the most lenient target on them for maximal flexibility.
return CFT_HostDevice;
@@ -95,9 +161,8 @@ Sema::CUDAFunctionPreference
Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
assert(Callee && "Callee must be valid.");
+ CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller);
CUDAFunctionTarget CalleeTarget = IdentifyCUDATarget(Callee);
- CUDAFunctionTarget CallerTarget =
- (Caller != nullptr) ? IdentifyCUDATarget(Caller) : Sema::CFT_Host;
// If one of the targets is invalid, the check always fails, no matter what
// the other target is.
@@ -107,8 +172,7 @@ Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
// (a) Can't call global from some contexts until we support CUDA's
// dynamic parallelism.
if (CalleeTarget == CFT_Global &&
- (CallerTarget == CFT_Global || CallerTarget == CFT_Device ||
- (CallerTarget == CFT_HostDevice && getLangOpts().CUDAIsDevice)))
+ (CallerTarget == CFT_Global || CallerTarget == CFT_Device))
return CFP_Never;
// (b) Calling HostDevice is OK for everyone.
@@ -145,54 +209,31 @@ Sema::IdentifyCUDAPreference(const FunctionDecl *Caller,
llvm_unreachable("All cases should've been handled by now.");
}
-template <typename T>
-static void EraseUnwantedCUDAMatchesImpl(
- Sema &S, const FunctionDecl *Caller, llvm::SmallVectorImpl<T> &Matches,
- std::function<const FunctionDecl *(const T &)> FetchDecl) {
+void Sema::EraseUnwantedCUDAMatches(
+ const FunctionDecl *Caller,
+ SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches) {
if (Matches.size() <= 1)
return;
+ using Pair = std::pair<DeclAccessPair, FunctionDecl*>;
+
// Gets the CUDA function preference for a call from Caller to Match.
- auto GetCFP = [&](const T &Match) {
- return S.IdentifyCUDAPreference(Caller, FetchDecl(Match));
+ auto GetCFP = [&](const Pair &Match) {
+ return IdentifyCUDAPreference(Caller, Match.second);
};
// Find the best call preference among the functions in Matches.
- Sema::CUDAFunctionPreference BestCFP = GetCFP(*std::max_element(
+ CUDAFunctionPreference BestCFP = GetCFP(*std::max_element(
Matches.begin(), Matches.end(),
- [&](const T &M1, const T &M2) { return GetCFP(M1) < GetCFP(M2); }));
+ [&](const Pair &M1, const Pair &M2) { return GetCFP(M1) < GetCFP(M2); }));
// Erase all functions with lower priority.
Matches.erase(
- llvm::remove_if(Matches,
- [&](const T &Match) { return GetCFP(Match) < BestCFP; }),
+ llvm::remove_if(
+ Matches, [&](const Pair &Match) { return GetCFP(Match) < BestCFP; }),
Matches.end());
}
-void Sema::EraseUnwantedCUDAMatches(const FunctionDecl *Caller,
- SmallVectorImpl<FunctionDecl *> &Matches){
- EraseUnwantedCUDAMatchesImpl<FunctionDecl *>(
- *this, Caller, Matches, [](const FunctionDecl *item) { return item; });
-}
-
-void Sema::EraseUnwantedCUDAMatches(const FunctionDecl *Caller,
- SmallVectorImpl<DeclAccessPair> &Matches) {
- EraseUnwantedCUDAMatchesImpl<DeclAccessPair>(
- *this, Caller, Matches, [](const DeclAccessPair &item) {
- return dyn_cast<FunctionDecl>(item.getDecl());
- });
-}
-
-void Sema::EraseUnwantedCUDAMatches(
- const FunctionDecl *Caller,
- SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches){
- EraseUnwantedCUDAMatchesImpl<std::pair<DeclAccessPair, FunctionDecl *>>(
- *this, Caller, Matches,
- [](const std::pair<DeclAccessPair, FunctionDecl *> &item) {
- return dyn_cast<FunctionDecl>(item.second);
- });
-}
-
/// When an implicitly-declared special member has to invoke more than one
/// base/field special member, conflicts may occur in the targets of these
/// members. For example, if one base's member __host__ and another's is
@@ -441,9 +482,23 @@ bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
// * a __device__ function with this signature was already declared, in which
// case in which case we output an error, unless the __device__ decl is in a
// system header, in which case we leave the constexpr function unattributed.
-void Sema::maybeAddCUDAHostDeviceAttrs(Scope *S, FunctionDecl *NewD,
+//
+// In addition, all function decls are treated as __host__ __device__ when
+// ForceCUDAHostDeviceDepth > 0 (corresponding to code within a
+// #pragma clang force_cuda_host_device_begin/end
+// pair).
+void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
const LookupResult &Previous) {
- assert(getLangOpts().CUDA && "May be called only for CUDA compilations.");
+ assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+
+ if (ForceCUDAHostDeviceDepth > 0) {
+ if (!NewD->hasAttr<CUDAHostAttr>())
+ NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ if (!NewD->hasAttr<CUDADeviceAttr>())
+ NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ return;
+ }
+
if (!getLangOpts().CUDAHostDeviceConstexpr || !NewD->isConstexpr() ||
NewD->isVariadic() || NewD->hasAttr<CUDAHostAttr>() ||
NewD->hasAttr<CUDADeviceAttr>() || NewD->hasAttr<CUDAGlobalAttr>())
@@ -480,3 +535,378 @@ void Sema::maybeAddCUDAHostDeviceAttrs(Scope *S, FunctionDecl *NewD,
NewD->addAttr(CUDAHostAttr::CreateImplicit(Context));
NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
}
+
+// In CUDA, there are some constructs which may appear in semantically-valid
+// code, but trigger errors if we ever generate code for the function in which
+// they appear. Essentially every construct you're not allowed to use on the
+// device falls into this category, because you are allowed to use these
+// constructs in a __host__ __device__ function, but only if that function is
+// never codegen'ed on the device.
+//
+// To handle semantic checking for these constructs, we keep track of the set of
+// functions we know will be emitted, either because we could tell a priori that
+// they would be emitted, or because they were transitively called by a
+// known-emitted function.
+//
+// We also keep a partial call graph of which not-known-emitted functions call
+// which other not-known-emitted functions.
+//
+// When we see something which is illegal if the current function is emitted
+// (usually by way of CUDADiagIfDeviceCode, CUDADiagIfHostCode, or
+// CheckCUDACall), we first check if the current function is known-emitted. If
+// so, we immediately output the diagnostic.
+//
+// Otherwise, we "defer" the diagnostic. It sits in Sema::CUDADeferredDiags
+// until we discover that the function is known-emitted, at which point we take
+// it out of this map and emit the diagnostic.
+
+Sema::CUDADiagBuilder::CUDADiagBuilder(Kind K, SourceLocation Loc,
+ unsigned DiagID, FunctionDecl *Fn,
+ Sema &S)
+ : S(S), Loc(Loc), DiagID(DiagID), Fn(Fn),
+ ShowCallStack(K == K_ImmediateWithCallStack || K == K_Deferred) {
+ switch (K) {
+ case K_Nop:
+ break;
+ case K_Immediate:
+ case K_ImmediateWithCallStack:
+ ImmediateDiag.emplace(S.Diag(Loc, DiagID));
+ break;
+ case K_Deferred:
+ assert(Fn && "Must have a function to attach the deferred diag to.");
+ PartialDiag.emplace(S.PDiag(DiagID));
+ break;
+ }
+}
+
+// Print notes showing how we can reach FD starting from an a priori
+// known-callable function.
+static void EmitCallStackNotes(Sema &S, FunctionDecl *FD) {
+ auto FnIt = S.CUDAKnownEmittedFns.find(FD);
+ while (FnIt != S.CUDAKnownEmittedFns.end()) {
+ DiagnosticBuilder Builder(
+ S.Diags.Report(FnIt->second.Loc, diag::note_called_by));
+ Builder << FnIt->second.FD;
+ Builder.setForceEmit();
+
+ FnIt = S.CUDAKnownEmittedFns.find(FnIt->second.FD);
+ }
+}
+
+Sema::CUDADiagBuilder::~CUDADiagBuilder() {
+ if (ImmediateDiag) {
+ // Emit our diagnostic and, if it was a warning or error, output a callstack
+ // if Fn isn't a priori known-emitted.
+ bool IsWarningOrError = S.getDiagnostics().getDiagnosticLevel(
+ DiagID, Loc) >= DiagnosticsEngine::Warning;
+ ImmediateDiag.reset(); // Emit the immediate diag.
+ if (IsWarningOrError && ShowCallStack)
+ EmitCallStackNotes(S, Fn);
+ } else if (PartialDiag) {
+ assert(ShowCallStack && "Must always show call stack for deferred diags.");
+ S.CUDADeferredDiags[Fn].push_back({Loc, std::move(*PartialDiag)});
+ }
+}
+
+// Do we know that we will eventually codegen the given function?
+static bool IsKnownEmitted(Sema &S, FunctionDecl *FD) {
+ // Templates are emitted when they're instantiated.
+ if (FD->isDependentContext())
+ return false;
+
+ // When compiling for device, host functions are never emitted. Similarly,
+ // when compiling for host, device and global functions are never emitted.
+ // (Technically, we do emit a host-side stub for global functions, but this
+ // doesn't count for our purposes here.)
+ Sema::CUDAFunctionTarget T = S.IdentifyCUDATarget(FD);
+ if (S.getLangOpts().CUDAIsDevice && T == Sema::CFT_Host)
+ return false;
+ if (!S.getLangOpts().CUDAIsDevice &&
+ (T == Sema::CFT_Device || T == Sema::CFT_Global))
+ return false;
+
+ // Check whether this function is externally visible -- if so, it's
+ // known-emitted.
+ //
+ // We have to check the GVA linkage of the function's *definition* -- if we
+ // only have a declaration, we don't know whether or not the function will be
+ // emitted, because (say) the definition could include "inline".
+ FunctionDecl *Def = FD->getDefinition();
+
+ // We may currently be parsing the body of FD, in which case
+ // FD->getDefinition() will be null, but we still want to treat FD as though
+ // it's a definition.
+ if (!Def && FD->willHaveBody())
+ Def = FD;
+
+ if (Def &&
+ !isDiscardableGVALinkage(S.getASTContext().GetGVALinkageForFunction(Def)))
+ return true;
+
+ // Otherwise, the function is known-emitted if it's in our set of
+ // known-emitted functions.
+ return S.CUDAKnownEmittedFns.count(FD) > 0;
+}
+
+Sema::CUDADiagBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
+ unsigned DiagID) {
+ assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ CUDADiagBuilder::Kind DiagKind = [&] {
+ switch (CurrentCUDATarget()) {
+ case CFT_Global:
+ case CFT_Device:
+ return CUDADiagBuilder::K_Immediate;
+ case CFT_HostDevice:
+ // An HD function counts as host code if we're compiling for host, and
+ // device code if we're compiling for device. Defer any errors in device
+ // mode until the function is known-emitted.
+ if (getLangOpts().CUDAIsDevice) {
+ return IsKnownEmitted(*this, dyn_cast<FunctionDecl>(CurContext))
+ ? CUDADiagBuilder::K_ImmediateWithCallStack
+ : CUDADiagBuilder::K_Deferred;
+ }
+ return CUDADiagBuilder::K_Nop;
+
+ default:
+ return CUDADiagBuilder::K_Nop;
+ }
+ }();
+ return CUDADiagBuilder(DiagKind, Loc, DiagID,
+ dyn_cast<FunctionDecl>(CurContext), *this);
+}
+
+Sema::CUDADiagBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
+ unsigned DiagID) {
+ assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ CUDADiagBuilder::Kind DiagKind = [&] {
+ switch (CurrentCUDATarget()) {
+ case CFT_Host:
+ return CUDADiagBuilder::K_Immediate;
+ case CFT_HostDevice:
+ // An HD function counts as host code if we're compiling for host, and
+ // device code if we're compiling for device. Defer any errors in device
+ // mode until the function is known-emitted.
+ if (getLangOpts().CUDAIsDevice)
+ return CUDADiagBuilder::K_Nop;
+
+ return IsKnownEmitted(*this, dyn_cast<FunctionDecl>(CurContext))
+ ? CUDADiagBuilder::K_ImmediateWithCallStack
+ : CUDADiagBuilder::K_Deferred;
+ default:
+ return CUDADiagBuilder::K_Nop;
+ }
+ }();
+ return CUDADiagBuilder(DiagKind, Loc, DiagID,
+ dyn_cast<FunctionDecl>(CurContext), *this);
+}
+
+// Emit any deferred diagnostics for FD and erase them from the map in which
+// they're stored.
+static void EmitDeferredDiags(Sema &S, FunctionDecl *FD) {
+ auto It = S.CUDADeferredDiags.find(FD);
+ if (It == S.CUDADeferredDiags.end())
+ return;
+ bool HasWarningOrError = false;
+ for (PartialDiagnosticAt &PDAt : It->second) {
+ const SourceLocation &Loc = PDAt.first;
+ const PartialDiagnostic &PD = PDAt.second;
+ HasWarningOrError |= S.getDiagnostics().getDiagnosticLevel(
+ PD.getDiagID(), Loc) >= DiagnosticsEngine::Warning;
+ DiagnosticBuilder Builder(S.Diags.Report(Loc, PD.getDiagID()));
+ Builder.setForceEmit();
+ PD.Emit(Builder);
+ }
+ S.CUDADeferredDiags.erase(It);
+
+ // FIXME: Should this be called after every warning/error emitted in the loop
+ // above, instead of just once per function? That would be consistent with
+ // how we handle immediate errors, but it also seems like a bit much.
+ if (HasWarningOrError)
+ EmitCallStackNotes(S, FD);
+}
+
+// Indicate that this function (and thus everything it transtively calls) will
+// be codegen'ed, and emit any deferred diagnostics on this function and its
+// (transitive) callees.
+static void MarkKnownEmitted(Sema &S, FunctionDecl *OrigCaller,
+ FunctionDecl *OrigCallee, SourceLocation OrigLoc) {
+ // Nothing to do if we already know that FD is emitted.
+ if (IsKnownEmitted(S, OrigCallee)) {
+ assert(!S.CUDACallGraph.count(OrigCallee));
+ return;
+ }
+
+ // We've just discovered that OrigCallee is known-emitted. Walk our call
+ // graph to see what else we can now discover also must be emitted.
+
+ struct CallInfo {
+ FunctionDecl *Caller;
+ FunctionDecl *Callee;
+ SourceLocation Loc;
+ };
+ llvm::SmallVector<CallInfo, 4> Worklist = {{OrigCaller, OrigCallee, OrigLoc}};
+ llvm::SmallSet<CanonicalDeclPtr<FunctionDecl>, 4> Seen;
+ Seen.insert(OrigCallee);
+ while (!Worklist.empty()) {
+ CallInfo C = Worklist.pop_back_val();
+ assert(!IsKnownEmitted(S, C.Callee) &&
+ "Worklist should not contain known-emitted functions.");
+ S.CUDAKnownEmittedFns[C.Callee] = {C.Caller, C.Loc};
+ EmitDeferredDiags(S, C.Callee);
+
+ // If this is a template instantiation, explore its callgraph as well:
+ // Non-dependent calls are part of the template's callgraph, while dependent
+ // calls are part of to the instantiation's call graph.
+ if (auto *Templ = C.Callee->getPrimaryTemplate()) {
+ FunctionDecl *TemplFD = Templ->getAsFunction();
+ if (!Seen.count(TemplFD) && !S.CUDAKnownEmittedFns.count(TemplFD)) {
+ Seen.insert(TemplFD);
+ Worklist.push_back(
+ {/* Caller = */ C.Caller, /* Callee = */ TemplFD, C.Loc});
+ }
+ }
+
+ // Add all functions called by Callee to our worklist.
+ auto CGIt = S.CUDACallGraph.find(C.Callee);
+ if (CGIt == S.CUDACallGraph.end())
+ continue;
+
+ for (std::pair<CanonicalDeclPtr<FunctionDecl>, SourceLocation> FDLoc :
+ CGIt->second) {
+ FunctionDecl *NewCallee = FDLoc.first;
+ SourceLocation CallLoc = FDLoc.second;
+ if (Seen.count(NewCallee) || IsKnownEmitted(S, NewCallee))
+ continue;
+ Seen.insert(NewCallee);
+ Worklist.push_back(
+ {/* Caller = */ C.Callee, /* Callee = */ NewCallee, CallLoc});
+ }
+
+ // C.Callee is now known-emitted, so we no longer need to maintain its list
+ // of callees in CUDACallGraph.
+ S.CUDACallGraph.erase(CGIt);
+ }
+}
+
+bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
+ assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ assert(Callee && "Callee may not be null.");
+ // FIXME: Is bailing out early correct here? Should we instead assume that
+ // the caller is a global initializer?
+ FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ if (!Caller)
+ return true;
+
+ // If the caller is known-emitted, mark the callee as known-emitted.
+ // Otherwise, mark the call in our call graph so we can traverse it later.
+ bool CallerKnownEmitted = IsKnownEmitted(*this, Caller);
+ if (CallerKnownEmitted)
+ MarkKnownEmitted(*this, Caller, Callee, Loc);
+ else {
+ // If we have
+ // host fn calls kernel fn calls host+device,
+ // the HD function does not get instantiated on the host. We model this by
+ // omitting at the call to the kernel from the callgraph. This ensures
+ // that, when compiling for host, only HD functions actually called from the
+ // host get marked as known-emitted.
+ if (getLangOpts().CUDAIsDevice || IdentifyCUDATarget(Callee) != CFT_Global)
+ CUDACallGraph[Caller].insert({Callee, Loc});
+ }
+
+ CUDADiagBuilder::Kind DiagKind = [&] {
+ switch (IdentifyCUDAPreference(Caller, Callee)) {
+ case CFP_Never:
+ return CUDADiagBuilder::K_Immediate;
+ case CFP_WrongSide:
+ assert(Caller && "WrongSide calls require a non-null caller");
+ // If we know the caller will be emitted, we know this wrong-side call
+ // will be emitted, so it's an immediate error. Otherwise, defer the
+ // error until we know the caller is emitted.
+ return CallerKnownEmitted ? CUDADiagBuilder::K_ImmediateWithCallStack
+ : CUDADiagBuilder::K_Deferred;
+ default:
+ return CUDADiagBuilder::K_Nop;
+ }
+ }();
+
+ if (DiagKind == CUDADiagBuilder::K_Nop)
+ return true;
+
+ // Avoid emitting this error twice for the same location. Using a hashtable
+ // like this is unfortunate, but because we must continue parsing as normal
+ // after encountering a deferred error, it's otherwise very tricky for us to
+ // ensure that we only emit this deferred error once.
+ if (!LocsWithCUDACallDiags.insert({Caller, Loc}).second)
+ return true;
+
+ CUDADiagBuilder(DiagKind, Loc, diag::err_ref_bad_target, Caller, *this)
+ << IdentifyCUDATarget(Callee) << Callee << IdentifyCUDATarget(Caller);
+ CUDADiagBuilder(DiagKind, Callee->getLocation(), diag::note_previous_decl,
+ Caller, *this)
+ << Callee;
+ return DiagKind != CUDADiagBuilder::K_Immediate &&
+ DiagKind != CUDADiagBuilder::K_ImmediateWithCallStack;
+}
+
+void Sema::CUDASetLambdaAttrs(CXXMethodDecl *Method) {
+ assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ if (Method->hasAttr<CUDAHostAttr>() || Method->hasAttr<CUDADeviceAttr>())
+ return;
+ FunctionDecl *CurFn = dyn_cast<FunctionDecl>(CurContext);
+ if (!CurFn)
+ return;
+ CUDAFunctionTarget Target = IdentifyCUDATarget(CurFn);
+ if (Target == CFT_Global || Target == CFT_Device) {
+ Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ } else if (Target == CFT_HostDevice) {
+ Method->addAttr(CUDADeviceAttr::CreateImplicit(Context));
+ Method->addAttr(CUDAHostAttr::CreateImplicit(Context));
+ }
+}
+
+void Sema::checkCUDATargetOverload(FunctionDecl *NewFD,
+ const LookupResult &Previous) {
+ assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ CUDAFunctionTarget NewTarget = IdentifyCUDATarget(NewFD);
+ for (NamedDecl *OldND : Previous) {
+ FunctionDecl *OldFD = OldND->getAsFunction();
+ if (!OldFD)
+ continue;
+
+ CUDAFunctionTarget OldTarget = IdentifyCUDATarget(OldFD);
+ // Don't allow HD and global functions to overload other functions with the
+ // same signature. We allow overloading based on CUDA attributes so that
+ // functions can have different implementations on the host and device, but
+ // HD/global functions "exist" in some sense on both the host and device, so
+ // should have the same implementation on both sides.
+ if (NewTarget != OldTarget &&
+ ((NewTarget == CFT_HostDevice) || (OldTarget == CFT_HostDevice) ||
+ (NewTarget == CFT_Global) || (OldTarget == CFT_Global)) &&
+ !IsOverload(NewFD, OldFD, /* UseMemberUsingDeclRules = */ false,
+ /* ConsiderCudaAttrs = */ false)) {
+ Diag(NewFD->getLocation(), diag::err_cuda_ovl_target)
+ << NewTarget << NewFD->getDeclName() << OldTarget << OldFD;
+ Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ break;
+ }
+ }
+}
+
+template <typename AttrTy>
+static void copyAttrIfPresent(Sema &S, FunctionDecl *FD,
+ const FunctionDecl &TemplateFD) {
+ if (AttrTy *Attribute = TemplateFD.getAttr<AttrTy>()) {
+ AttrTy *Clone = Attribute->clone(S.Context);
+ Clone->setInherited(true);
+ FD->addAttr(Clone);
+ }
+}
+
+void Sema::inheritCUDATargetAttrs(FunctionDecl *FD,
+ const FunctionTemplateDecl &TD) {
+ const FunctionDecl &TemplateFD = *TD.getTemplatedDecl();
+ copyAttrIfPresent<CUDAGlobalAttr>(*this, FD, TemplateFD);
+ copyAttrIfPresent<CUDAHostAttr>(*this, FD, TemplateFD);
+ copyAttrIfPresent<CUDADeviceAttr>(*this, FD, TemplateFD);
+}
diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp
index 949263d24897..d8971c0d37eb 100644
--- a/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/lib/Sema/SemaCXXScopeSpec.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
@@ -20,9 +19,9 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/Support/raw_ostream.h"
using namespace clang;
/// \brief Find the current instantiation that associated with the given type.
@@ -381,12 +380,11 @@ NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS) {
}
bool Sema::isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
- SourceLocation IdLoc,
- IdentifierInfo &II,
- ParsedType ObjectTypePtr) {
- QualType ObjectType = GetTypeFromParser(ObjectTypePtr);
- LookupResult Found(*this, &II, IdLoc, LookupNestedNameSpecifierName);
-
+ NestedNameSpecInfo &IdInfo) {
+ QualType ObjectType = GetTypeFromParser(IdInfo.ObjectType);
+ LookupResult Found(*this, IdInfo.Identifier, IdInfo.IdentifierLoc,
+ LookupNestedNameSpecifierName);
+
// Determine where to perform name lookup
DeclContext *LookupCtx = nullptr;
bool isDependent = false;
@@ -449,11 +447,8 @@ class NestedNameSpecifierValidatorCCC : public CorrectionCandidateCallback {
/// by ActOnCXXNestedNameSpecifier.
///
/// \param S Scope in which the nested-name-specifier occurs.
-/// \param Identifier Identifier in the sequence "identifier" "::".
-/// \param IdentifierLoc Location of the \p Identifier.
-/// \param CCLoc Location of "::" following Identifier.
-/// \param ObjectType Type of postfix expression if the nested-name-specifier
-/// occurs in construct like: <tt>ptr->nns::f</tt>.
+/// \param IdInfo Parser information about an identifier in the
+/// nested-name-spec.
/// \param EnteringContext If true, enter the context specified by the
/// nested-name-specifier.
/// \param SS Optional nested name specifier preceding the identifier.
@@ -479,17 +474,15 @@ class NestedNameSpecifierValidatorCCC : public CorrectionCandidateCallback {
/// dependent context, for example. Nor will it extend \p SS with the scope
/// specifier.
bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
- IdentifierInfo &Identifier,
- SourceLocation IdentifierLoc,
- SourceLocation CCLoc,
- QualType ObjectType,
+ NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon) {
- LookupResult Found(*this, &Identifier, IdentifierLoc,
+ LookupResult Found(*this, IdInfo.Identifier, IdInfo.IdentifierLoc,
LookupNestedNameSpecifierName);
+ QualType ObjectType = GetTypeFromParser(IdInfo.ObjectType);
// Determine where to perform name lookup
DeclContext *LookupCtx = nullptr;
@@ -574,7 +567,7 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
// base object type or prior nested-name-specifier, so this
// nested-name-specifier refers to an unknown specialization. Just build
// a dependent nested-name-specifier.
- SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc);
+ SS.Extend(Context, IdInfo.Identifier, IdInfo.IdentifierLoc, IdInfo.CCLoc);
return false;
}
@@ -593,18 +586,19 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
// allowed, suggest replacement to ':'.
if (IsCorrectedToColon) {
*IsCorrectedToColon = true;
- Diag(CCLoc, diag::err_nested_name_spec_is_not_class)
- << &Identifier << getLangOpts().CPlusPlus
- << FixItHint::CreateReplacement(CCLoc, ":");
+ Diag(IdInfo.CCLoc, diag::err_nested_name_spec_is_not_class)
+ << IdInfo.Identifier << getLangOpts().CPlusPlus
+ << FixItHint::CreateReplacement(IdInfo.CCLoc, ":");
if (NamedDecl *ND = R.getAsSingle<NamedDecl>())
Diag(ND->getLocation(), diag::note_declared_at);
return true;
}
// Replacement '::' -> ':' is not allowed, just issue respective error.
Diag(R.getNameLoc(), diag::err_expected_class_or_namespace)
- << &Identifier << getLangOpts().CPlusPlus;
+ << IdInfo.Identifier << getLangOpts().CPlusPlus;
if (NamedDecl *ND = R.getAsSingle<NamedDecl>())
- Diag(ND->getLocation(), diag::note_entity_declared_at) << &Identifier;
+ Diag(ND->getLocation(), diag::note_entity_declared_at)
+ << IdInfo.Identifier;
return true;
}
}
@@ -639,7 +633,7 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
Found.addDecl(ND);
Found.setLookupName(Corrected.getCorrection());
} else {
- Found.setLookupName(&Identifier);
+ Found.setLookupName(IdInfo.Identifier);
}
}
@@ -649,7 +643,7 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
bool AcceptSpec = isAcceptableNestedNameSpecifier(SD, &IsExtension);
if (!AcceptSpec && IsExtension) {
AcceptSpec = true;
- Diag(IdentifierLoc, diag::ext_nested_name_spec_is_enum);
+ Diag(IdInfo.IdentifierLoc, diag::ext_nested_name_spec_is_enum);
}
if (AcceptSpec) {
if (!ObjectType.isNull() && !ObjectTypeSearchedInScope &&
@@ -666,7 +660,7 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
// Note that C++11 does *not* perform this redundant lookup.
NamedDecl *OuterDecl;
if (S) {
- LookupResult FoundOuter(*this, &Identifier, IdentifierLoc,
+ LookupResult FoundOuter(*this, IdInfo.Identifier, IdInfo.IdentifierLoc,
LookupNestedNameSpecifierName);
LookupName(FoundOuter, S);
OuterDecl = FoundOuter.getAsSingle<NamedDecl>();
@@ -682,9 +676,9 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
if (ErrorRecoveryLookup)
return true;
- Diag(IdentifierLoc,
+ Diag(IdInfo.IdentifierLoc,
diag::err_nested_name_member_ref_lookup_ambiguous)
- << &Identifier;
+ << IdInfo.Identifier;
Diag(SD->getLocation(), diag::note_ambig_member_ref_object_type)
<< ObjectType;
Diag(OuterDecl->getLocation(), diag::note_ambig_member_ref_scope);
@@ -703,16 +697,15 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
return false;
// The use of a nested name specifier may trigger deprecation warnings.
- DiagnoseUseOfDecl(SD, CCLoc);
+ DiagnoseUseOfDecl(SD, IdInfo.CCLoc);
-
if (NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(SD)) {
- SS.Extend(Context, Namespace, IdentifierLoc, CCLoc);
+ SS.Extend(Context, Namespace, IdInfo.IdentifierLoc, IdInfo.CCLoc);
return false;
}
if (NamespaceAliasDecl *Alias = dyn_cast<NamespaceAliasDecl>(SD)) {
- SS.Extend(Context, Alias, IdentifierLoc, CCLoc);
+ SS.Extend(Context, Alias, IdInfo.IdentifierLoc, IdInfo.CCLoc);
return false;
}
@@ -722,41 +715,41 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
if (isa<InjectedClassNameType>(T)) {
InjectedClassNameTypeLoc InjectedTL
= TLB.push<InjectedClassNameTypeLoc>(T);
- InjectedTL.setNameLoc(IdentifierLoc);
+ InjectedTL.setNameLoc(IdInfo.IdentifierLoc);
} else if (isa<RecordType>(T)) {
RecordTypeLoc RecordTL = TLB.push<RecordTypeLoc>(T);
- RecordTL.setNameLoc(IdentifierLoc);
+ RecordTL.setNameLoc(IdInfo.IdentifierLoc);
} else if (isa<TypedefType>(T)) {
TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(T);
- TypedefTL.setNameLoc(IdentifierLoc);
+ TypedefTL.setNameLoc(IdInfo.IdentifierLoc);
} else if (isa<EnumType>(T)) {
EnumTypeLoc EnumTL = TLB.push<EnumTypeLoc>(T);
- EnumTL.setNameLoc(IdentifierLoc);
+ EnumTL.setNameLoc(IdInfo.IdentifierLoc);
} else if (isa<TemplateTypeParmType>(T)) {
TemplateTypeParmTypeLoc TemplateTypeTL
= TLB.push<TemplateTypeParmTypeLoc>(T);
- TemplateTypeTL.setNameLoc(IdentifierLoc);
+ TemplateTypeTL.setNameLoc(IdInfo.IdentifierLoc);
} else if (isa<UnresolvedUsingType>(T)) {
UnresolvedUsingTypeLoc UnresolvedTL
= TLB.push<UnresolvedUsingTypeLoc>(T);
- UnresolvedTL.setNameLoc(IdentifierLoc);
+ UnresolvedTL.setNameLoc(IdInfo.IdentifierLoc);
} else if (isa<SubstTemplateTypeParmType>(T)) {
SubstTemplateTypeParmTypeLoc TL
= TLB.push<SubstTemplateTypeParmTypeLoc>(T);
- TL.setNameLoc(IdentifierLoc);
+ TL.setNameLoc(IdInfo.IdentifierLoc);
} else if (isa<SubstTemplateTypeParmPackType>(T)) {
SubstTemplateTypeParmPackTypeLoc TL
= TLB.push<SubstTemplateTypeParmPackTypeLoc>(T);
- TL.setNameLoc(IdentifierLoc);
+ TL.setNameLoc(IdInfo.IdentifierLoc);
} else {
llvm_unreachable("Unhandled TypeDecl node in nested-name-specifier");
}
if (T->isEnumeralType())
- Diag(IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
+ Diag(IdInfo.IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
- CCLoc);
+ IdInfo.CCLoc);
return false;
}
@@ -795,9 +788,11 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
if (DC->isDependentContext() && DC->isFunctionOrMethod()) {
CXXRecordDecl *ContainingClass = dyn_cast<CXXRecordDecl>(DC->getParent());
if (ContainingClass && ContainingClass->hasAnyDependentBases()) {
- Diag(IdentifierLoc, diag::ext_undeclared_unqual_id_with_dependent_base)
- << &Identifier << ContainingClass;
- SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc);
+ Diag(IdInfo.IdentifierLoc,
+ diag::ext_undeclared_unqual_id_with_dependent_base)
+ << IdInfo.Identifier << ContainingClass;
+ SS.Extend(Context, IdInfo.Identifier, IdInfo.IdentifierLoc,
+ IdInfo.CCLoc);
return false;
}
}
@@ -805,28 +800,27 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
if (!Found.empty()) {
if (TypeDecl *TD = Found.getAsSingle<TypeDecl>())
- Diag(IdentifierLoc, diag::err_expected_class_or_namespace)
+ Diag(IdInfo.IdentifierLoc, diag::err_expected_class_or_namespace)
<< Context.getTypeDeclType(TD) << getLangOpts().CPlusPlus;
else {
- Diag(IdentifierLoc, diag::err_expected_class_or_namespace)
- << &Identifier << getLangOpts().CPlusPlus;
+ Diag(IdInfo.IdentifierLoc, diag::err_expected_class_or_namespace)
+ << IdInfo.Identifier << getLangOpts().CPlusPlus;
if (NamedDecl *ND = Found.getAsSingle<NamedDecl>())
- Diag(ND->getLocation(), diag::note_entity_declared_at) << &Identifier;
+ Diag(ND->getLocation(), diag::note_entity_declared_at)
+ << IdInfo.Identifier;
}
} else if (SS.isSet())
- Diag(IdentifierLoc, diag::err_no_member) << &Identifier << LookupCtx
- << SS.getRange();
+ Diag(IdInfo.IdentifierLoc, diag::err_no_member) << IdInfo.Identifier
+ << LookupCtx << SS.getRange();
else
- Diag(IdentifierLoc, diag::err_undeclared_var_use) << &Identifier;
+ Diag(IdInfo.IdentifierLoc, diag::err_undeclared_var_use)
+ << IdInfo.Identifier;
return true;
}
bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
- IdentifierInfo &Identifier,
- SourceLocation IdentifierLoc,
- SourceLocation CCLoc,
- ParsedType ObjectType,
+ NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup,
@@ -834,9 +828,8 @@ bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
if (SS.isInvalid())
return true;
- return BuildCXXNestedNameSpecifier(S, Identifier, IdentifierLoc, CCLoc,
- GetTypeFromParser(ObjectType),
- EnteringContext, SS,
+ return BuildCXXNestedNameSpecifier(S, IdInfo,
+ EnteringContext, SS,
/*ScopeLookupResult=*/nullptr, false,
IsCorrectedToColon);
}
@@ -871,17 +864,12 @@ bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
///
/// The arguments are the same as those passed to ActOnCXXNestedNameSpecifier.
bool Sema::IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
- IdentifierInfo &Identifier,
- SourceLocation IdentifierLoc,
- SourceLocation ColonLoc,
- ParsedType ObjectType,
+ NestedNameSpecInfo &IdInfo,
bool EnteringContext) {
if (SS.isInvalid())
return false;
- return !BuildCXXNestedNameSpecifier(S, Identifier, IdentifierLoc, ColonLoc,
- GetTypeFromParser(ObjectType),
- EnteringContext, SS,
+ return !BuildCXXNestedNameSpecifier(S, IdInfo, EnteringContext, SS,
/*ScopeLookupResult=*/nullptr, true);
}
@@ -987,9 +975,9 @@ void *Sema::SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS) {
if (SS.isEmpty() || SS.isInvalid())
return nullptr;
- void *Mem = Context.Allocate((sizeof(NestedNameSpecifierAnnotation) +
- SS.location_size()),
- llvm::alignOf<NestedNameSpecifierAnnotation>());
+ void *Mem = Context.Allocate(
+ (sizeof(NestedNameSpecifierAnnotation) + SS.location_size()),
+ alignof(NestedNameSpecifierAnnotation));
NestedNameSpecifierAnnotation *Annotation
= new (Mem) NestedNameSpecifierAnnotation;
Annotation->NNS = SS.getScopeRep();
@@ -1013,6 +1001,11 @@ void Sema::RestoreNestedNameSpecifierAnnotation(void *AnnotationPtr,
bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
+ // Don't enter a declarator context when the current context is an Objective-C
+ // declaration.
+ if (isa<ObjCContainerDecl>(CurContext) || isa<ObjCMethodDecl>(CurContext))
+ return false;
+
NestedNameSpecifier *Qualifier = SS.getScopeRep();
// There are only two places a well-formed program may qualify a
diff --git a/lib/Sema/SemaCast.cpp b/lib/Sema/SemaCast.cpp
index e83dd0716780..6222e4cec47a 100644
--- a/lib/Sema/SemaCast.cpp
+++ b/lib/Sema/SemaCast.cpp
@@ -256,6 +256,7 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
Op.CheckConstCast();
if (Op.SrcExpr.isInvalid())
return ExprError();
+ DiscardMisalignedMemberAddress(DestType.getTypePtr(), E);
}
return Op.complete(CXXConstCastExpr::Create(Context, Op.ResultType,
Op.ValueKind, Op.SrcExpr.get(), DestTInfo,
@@ -279,6 +280,7 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
Op.CheckReinterpretCast();
if (Op.SrcExpr.isInvalid())
return ExprError();
+ DiscardMisalignedMemberAddress(DestType.getTypePtr(), E);
}
return Op.complete(CXXReinterpretCastExpr::Create(Context, Op.ResultType,
Op.ValueKind, Op.Kind, Op.SrcExpr.get(),
@@ -291,6 +293,7 @@ Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
Op.CheckStaticCast();
if (Op.SrcExpr.isInvalid())
return ExprError();
+ DiscardMisalignedMemberAddress(DestType.getTypePtr(), E);
}
return Op.complete(CXXStaticCastExpr::Create(Context, Op.ResultType,
@@ -980,7 +983,7 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
// C++11 [expr.static.cast]p3:
// A glvalue of type "cv1 T1" can be cast to type "rvalue reference to cv2
// T2" if "cv2 T2" is reference-compatible with "cv1 T1".
- tcr = TryLValueToRValueCast(Self, SrcExpr.get(), DestType, CStyle, Kind,
+ tcr = TryLValueToRValueCast(Self, SrcExpr.get(), DestType, CStyle, Kind,
BasePath, msg);
if (tcr != TC_NotApplicable)
return tcr;
@@ -1131,12 +1134,12 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
}
/// Tests whether a conversion according to N2844 is valid.
-TryCastResult
-TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
- bool CStyle, CastKind &Kind, CXXCastPath &BasePath,
- unsigned &msg) {
+TryCastResult TryLValueToRValueCast(Sema &Self, Expr *SrcExpr,
+ QualType DestType, bool CStyle,
+ CastKind &Kind, CXXCastPath &BasePath,
+ unsigned &msg) {
// C++11 [expr.static.cast]p3:
- // A glvalue of type "cv1 T1" can be cast to type "rvalue reference to
+ // A glvalue of type "cv1 T1" can be cast to type "rvalue reference to
// cv2 T2" if "cv2 T2" is reference-compatible with "cv1 T1".
const RValueReferenceType *R = DestType->getAs<RValueReferenceType>();
if (!R)
@@ -1157,15 +1160,18 @@ TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
FromType = FromType.getUnqualifiedType();
ToType = ToType.getUnqualifiedType();
}
-
- if (Self.CompareReferenceRelationship(SrcExpr->getLocStart(),
- ToType, FromType,
- DerivedToBase, ObjCConversion,
- ObjCLifetimeConversion)
- < Sema::Ref_Compatible_With_Added_Qualification) {
- if (CStyle)
+
+ Sema::ReferenceCompareResult RefResult = Self.CompareReferenceRelationship(
+ SrcExpr->getLocStart(), ToType, FromType, DerivedToBase, ObjCConversion,
+ ObjCLifetimeConversion);
+ if (RefResult != Sema::Ref_Compatible) {
+ if (CStyle || RefResult == Sema::Ref_Incompatible)
return TC_NotApplicable;
- msg = diag::err_bad_lvalue_to_rvalue_cast;
+ // Diagnose types which are reference-related but not compatible here since
+ // we can provide better diagnostics. In these cases forwarding to
+ // [expr.static.cast]p4 should never result in a well-formed cast.
+ msg = SrcExpr->isLValue() ? diag::err_bad_lvalue_to_rvalue_cast
+ : diag::err_bad_rvalue_to_rvalue_cast;
return TC_Failed;
}
@@ -1511,6 +1517,9 @@ TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
? InitializationKind::CreateFunctionalCast(OpRange, ListInitialization)
: InitializationKind::CreateCast(OpRange);
Expr *SrcExprRaw = SrcExpr.get();
+ // FIXME: Per DR242, we should check for an implicit conversion sequence
+ // or for a constructor that could be invoked by direct-initialization
+ // here, not for an initialization sequence.
InitializationSequence InitSeq(Self, Entity, InitKind, SrcExprRaw);
// At this point of CheckStaticCast, if the destination is a reference,
@@ -1646,7 +1655,8 @@ static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
if (NeedToMaterializeTemporary)
// This is a const_cast from a class prvalue to an rvalue reference type.
// Materialize a temporary to store the result of the conversion.
- SrcExpr = Self.CreateMaterializeTemporaryExpr(SrcType, SrcExpr.get(),
+ SrcExpr = Self.CreateMaterializeTemporaryExpr(SrcExpr.get()->getType(),
+ SrcExpr.get(),
/*IsLValueReference*/ false);
return TC_Success;
@@ -1910,7 +1920,10 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
switch (SrcExpr.get()->getObjectKind()) {
case OK_Ordinary:
break;
- case OK_BitField: inappropriate = "bit-field"; break;
+ case OK_BitField:
+ msg = diag::err_bad_cxx_cast_bitfield;
+ return TC_NotApplicable;
+ // FIXME: Use a specific diagnostic for the rest of these cases.
case OK_VectorComponent: inappropriate = "vector element"; break;
case OK_ObjCProperty: inappropriate = "property expression"; break;
case OK_ObjCSubscript: inappropriate = "container subscripting expression";
@@ -2435,7 +2448,7 @@ void CastOperation::CheckCStyleCast() {
return;
}
Self.Diag(OpRange.getBegin(),
- diag::error_opencl_cast_non_zero_to_event_t)
+ diag::err_opencl_cast_non_zero_to_event_t)
<< CastInt.toString(10) << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
@@ -2516,7 +2529,8 @@ void CastOperation::CheckCStyleCast() {
}
}
- if (Self.getLangOpts().OpenCL && !Self.getOpenCLOptions().cl_khr_fp16) {
+ if (Self.getLangOpts().OpenCL &&
+ !Self.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
if (DestType->isHalfType()) {
Self.Diag(SrcExpr.get()->getLocStart(), diag::err_opencl_cast_to_half)
<< DestType << SrcExpr.get()->getSourceRange();
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index 7f7dbe8873d4..9c902959233f 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -12,7 +12,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclCXX.h"
@@ -33,14 +32,14 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/Locale.h"
-#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/raw_ostream.h"
-#include <limits>
using namespace clang;
using namespace sema;
@@ -316,8 +315,18 @@ static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
return checkOpenCLBlockArgs(S, BlockArg);
}
+/// Diagnose integer type and any valid implicit convertion to it.
+static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
+ const QualType &IntType);
+
static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
- unsigned Start, unsigned End);
+ unsigned Start, unsigned End) {
+ bool IllegalParams = false;
+ for (unsigned I = Start; I <= End; ++I)
+ IllegalParams |= checkOpenCLEnqueueIntType(S, TheCall->getArg(I),
+ S.Context.getSizeType());
+ return IllegalParams;
+}
/// OpenCL v2.0, s6.13.17.1 - Check that sizes are provided for all
/// 'local void*' parameter of passed block.
@@ -452,16 +461,20 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
Expr *Arg4 = TheCall->getArg(4);
Expr *Arg5 = TheCall->getArg(5);
- // Fith argument is always passed as pointers to clk_event_t.
- if (!Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
+ // Fifth argument is always passed as a pointer to clk_event_t.
+ if (!Arg4->isNullPointerConstant(S.Context,
+ Expr::NPC_ValueDependentIsNotNull) &&
+ !Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
S.Diag(TheCall->getArg(4)->getLocStart(),
diag::err_opencl_enqueue_kernel_expected_type)
<< S.Context.getPointerType(S.Context.OCLClkEventTy);
return true;
}
- // Sixth argument is always passed as pointers to clk_event_t.
- if (!(Arg5->getType()->isPointerType() &&
+ // Sixth argument is always passed as a pointer to clk_event_t.
+ if (!Arg5->isNullPointerConstant(S.Context,
+ Expr::NPC_ValueDependentIsNotNull) &&
+ !(Arg5->getType()->isPointerType() &&
Arg5->getType()->getPointeeType()->isClkEventT())) {
S.Diag(TheCall->getArg(5)->getLocStart(),
diag::err_opencl_enqueue_kernel_expected_type)
@@ -792,6 +805,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (SemaBuiltinPrefetch(TheCall))
return ExprError();
break;
+ case Builtin::BI__builtin_alloca_with_align:
+ if (SemaBuiltinAllocaWithAlign(TheCall))
+ return ExprError();
+ break;
case Builtin::BI__assume:
case Builtin::BI__builtin_assume:
if (SemaBuiltinAssume(TheCall))
@@ -1021,6 +1038,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
// check for the argument.
if (SemaBuiltinRWPipe(*this, TheCall))
return ExprError();
+ TheCall->setType(Context.IntTy);
break;
case Builtin::BIreserve_read_pipe:
case Builtin::BIreserve_write_pipe:
@@ -1048,6 +1066,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BIget_pipe_max_packets:
if (SemaBuiltinPipePackets(*this, TheCall))
return ExprError();
+ TheCall->setType(Context.UnsignedIntTy);
break;
case Builtin::BIto_global:
case Builtin::BIto_local:
@@ -1064,6 +1083,13 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BIget_kernel_preferred_work_group_size_multiple:
if (SemaOpenCLBuiltinKernelWorkGroupSize(*this, TheCall))
return ExprError();
+ break;
+ case Builtin::BI__builtin_os_log_format:
+ case Builtin::BI__builtin_os_log_format_buffer_size:
+ if (SemaBuiltinOSLogFormat(TheCall)) {
+ return ExprError();
+ }
+ break;
}
// Since the target specific builtins for each arch overlap, only check those
@@ -1757,51 +1783,237 @@ static bool SemaBuiltinCpuSupports(Sema &S, CallExpr *TheCall) {
return false;
}
-bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- int i = 0, l = 0, u = 0;
+// Check if the rounding mode is legal.
+bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
+ // Indicates if this instruction has rounding control or just SAE.
+ bool HasRC = false;
+
+ unsigned ArgNum = 0;
switch (BuiltinID) {
default:
return false;
- case X86::BI__builtin_cpu_supports:
+ case X86::BI__builtin_ia32_vcvttsd2si32:
+ case X86::BI__builtin_ia32_vcvttsd2si64:
+ case X86::BI__builtin_ia32_vcvttsd2usi32:
+ case X86::BI__builtin_ia32_vcvttsd2usi64:
+ case X86::BI__builtin_ia32_vcvttss2si32:
+ case X86::BI__builtin_ia32_vcvttss2si64:
+ case X86::BI__builtin_ia32_vcvttss2usi32:
+ case X86::BI__builtin_ia32_vcvttss2usi64:
+ ArgNum = 1;
+ break;
+ case X86::BI__builtin_ia32_cvtps2pd512_mask:
+ case X86::BI__builtin_ia32_cvttpd2dq512_mask:
+ case X86::BI__builtin_ia32_cvttpd2qq512_mask:
+ case X86::BI__builtin_ia32_cvttpd2udq512_mask:
+ case X86::BI__builtin_ia32_cvttpd2uqq512_mask:
+ case X86::BI__builtin_ia32_cvttps2dq512_mask:
+ case X86::BI__builtin_ia32_cvttps2qq512_mask:
+ case X86::BI__builtin_ia32_cvttps2udq512_mask:
+ case X86::BI__builtin_ia32_cvttps2uqq512_mask:
+ case X86::BI__builtin_ia32_exp2pd_mask:
+ case X86::BI__builtin_ia32_exp2ps_mask:
+ case X86::BI__builtin_ia32_getexppd512_mask:
+ case X86::BI__builtin_ia32_getexpps512_mask:
+ case X86::BI__builtin_ia32_rcp28pd_mask:
+ case X86::BI__builtin_ia32_rcp28ps_mask:
+ case X86::BI__builtin_ia32_rsqrt28pd_mask:
+ case X86::BI__builtin_ia32_rsqrt28ps_mask:
+ case X86::BI__builtin_ia32_vcomisd:
+ case X86::BI__builtin_ia32_vcomiss:
+ case X86::BI__builtin_ia32_vcvtph2ps512_mask:
+ ArgNum = 3;
+ break;
+ case X86::BI__builtin_ia32_cmppd512_mask:
+ case X86::BI__builtin_ia32_cmpps512_mask:
+ case X86::BI__builtin_ia32_cmpsd_mask:
+ case X86::BI__builtin_ia32_cmpss_mask:
+ case X86::BI__builtin_ia32_cvtss2sd_round_mask:
+ case X86::BI__builtin_ia32_getexpsd128_round_mask:
+ case X86::BI__builtin_ia32_getexpss128_round_mask:
+ case X86::BI__builtin_ia32_maxpd512_mask:
+ case X86::BI__builtin_ia32_maxps512_mask:
+ case X86::BI__builtin_ia32_maxsd_round_mask:
+ case X86::BI__builtin_ia32_maxss_round_mask:
+ case X86::BI__builtin_ia32_minpd512_mask:
+ case X86::BI__builtin_ia32_minps512_mask:
+ case X86::BI__builtin_ia32_minsd_round_mask:
+ case X86::BI__builtin_ia32_minss_round_mask:
+ case X86::BI__builtin_ia32_rcp28sd_round_mask:
+ case X86::BI__builtin_ia32_rcp28ss_round_mask:
+ case X86::BI__builtin_ia32_reducepd512_mask:
+ case X86::BI__builtin_ia32_reduceps512_mask:
+ case X86::BI__builtin_ia32_rndscalepd_mask:
+ case X86::BI__builtin_ia32_rndscaleps_mask:
+ case X86::BI__builtin_ia32_rsqrt28sd_round_mask:
+ case X86::BI__builtin_ia32_rsqrt28ss_round_mask:
+ ArgNum = 4;
+ break;
+ case X86::BI__builtin_ia32_fixupimmpd512_mask:
+ case X86::BI__builtin_ia32_fixupimmpd512_maskz:
+ case X86::BI__builtin_ia32_fixupimmps512_mask:
+ case X86::BI__builtin_ia32_fixupimmps512_maskz:
+ case X86::BI__builtin_ia32_fixupimmsd_mask:
+ case X86::BI__builtin_ia32_fixupimmsd_maskz:
+ case X86::BI__builtin_ia32_fixupimmss_mask:
+ case X86::BI__builtin_ia32_fixupimmss_maskz:
+ case X86::BI__builtin_ia32_rangepd512_mask:
+ case X86::BI__builtin_ia32_rangeps512_mask:
+ case X86::BI__builtin_ia32_rangesd128_round_mask:
+ case X86::BI__builtin_ia32_rangess128_round_mask:
+ case X86::BI__builtin_ia32_reducesd_mask:
+ case X86::BI__builtin_ia32_reducess_mask:
+ case X86::BI__builtin_ia32_rndscalesd_round_mask:
+ case X86::BI__builtin_ia32_rndscaless_round_mask:
+ ArgNum = 5;
+ break;
+ case X86::BI__builtin_ia32_vcvtsd2si64:
+ case X86::BI__builtin_ia32_vcvtsd2si32:
+ case X86::BI__builtin_ia32_vcvtsd2usi32:
+ case X86::BI__builtin_ia32_vcvtsd2usi64:
+ case X86::BI__builtin_ia32_vcvtss2si32:
+ case X86::BI__builtin_ia32_vcvtss2si64:
+ case X86::BI__builtin_ia32_vcvtss2usi32:
+ case X86::BI__builtin_ia32_vcvtss2usi64:
+ ArgNum = 1;
+ HasRC = true;
+ break;
+ case X86::BI__builtin_ia32_cvtsi2sd64:
+ case X86::BI__builtin_ia32_cvtsi2ss32:
+ case X86::BI__builtin_ia32_cvtsi2ss64:
+ case X86::BI__builtin_ia32_cvtusi2sd64:
+ case X86::BI__builtin_ia32_cvtusi2ss32:
+ case X86::BI__builtin_ia32_cvtusi2ss64:
+ ArgNum = 2;
+ HasRC = true;
+ break;
+ case X86::BI__builtin_ia32_cvtdq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtudq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtpd2ps512_mask:
+ case X86::BI__builtin_ia32_cvtpd2qq512_mask:
+ case X86::BI__builtin_ia32_cvtpd2uqq512_mask:
+ case X86::BI__builtin_ia32_cvtps2qq512_mask:
+ case X86::BI__builtin_ia32_cvtps2uqq512_mask:
+ case X86::BI__builtin_ia32_cvtqq2pd512_mask:
+ case X86::BI__builtin_ia32_cvtqq2ps512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
+ case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
+ case X86::BI__builtin_ia32_sqrtpd512_mask:
+ case X86::BI__builtin_ia32_sqrtps512_mask:
+ ArgNum = 3;
+ HasRC = true;
+ break;
+ case X86::BI__builtin_ia32_addpd512_mask:
+ case X86::BI__builtin_ia32_addps512_mask:
+ case X86::BI__builtin_ia32_divpd512_mask:
+ case X86::BI__builtin_ia32_divps512_mask:
+ case X86::BI__builtin_ia32_mulpd512_mask:
+ case X86::BI__builtin_ia32_mulps512_mask:
+ case X86::BI__builtin_ia32_subpd512_mask:
+ case X86::BI__builtin_ia32_subps512_mask:
+ case X86::BI__builtin_ia32_addss_round_mask:
+ case X86::BI__builtin_ia32_addsd_round_mask:
+ case X86::BI__builtin_ia32_divss_round_mask:
+ case X86::BI__builtin_ia32_divsd_round_mask:
+ case X86::BI__builtin_ia32_mulss_round_mask:
+ case X86::BI__builtin_ia32_mulsd_round_mask:
+ case X86::BI__builtin_ia32_subss_round_mask:
+ case X86::BI__builtin_ia32_subsd_round_mask:
+ case X86::BI__builtin_ia32_scalefpd512_mask:
+ case X86::BI__builtin_ia32_scalefps512_mask:
+ case X86::BI__builtin_ia32_scalefsd_round_mask:
+ case X86::BI__builtin_ia32_scalefss_round_mask:
+ case X86::BI__builtin_ia32_getmantpd512_mask:
+ case X86::BI__builtin_ia32_getmantps512_mask:
+ case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
+ case X86::BI__builtin_ia32_sqrtsd_round_mask:
+ case X86::BI__builtin_ia32_sqrtss_round_mask:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmaddpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddps512_mask:
+ case X86::BI__builtin_ia32_vfmaddps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddps512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask:
+ case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
+ case X86::BI__builtin_ia32_vfmsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
+ case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
+ case X86::BI__builtin_ia32_vfnmaddpd512_mask:
+ case X86::BI__builtin_ia32_vfnmaddps512_mask:
+ case X86::BI__builtin_ia32_vfnmsubpd512_mask:
+ case X86::BI__builtin_ia32_vfnmsubpd512_mask3:
+ case X86::BI__builtin_ia32_vfnmsubps512_mask:
+ case X86::BI__builtin_ia32_vfnmsubps512_mask3:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask:
+ case X86::BI__builtin_ia32_vfmaddsd3_maskz:
+ case X86::BI__builtin_ia32_vfmaddsd3_mask3:
+ case X86::BI__builtin_ia32_vfmaddss3_mask:
+ case X86::BI__builtin_ia32_vfmaddss3_maskz:
+ case X86::BI__builtin_ia32_vfmaddss3_mask3:
+ ArgNum = 4;
+ HasRC = true;
+ break;
+ case X86::BI__builtin_ia32_getmantsd_round_mask:
+ case X86::BI__builtin_ia32_getmantss_round_mask:
+ ArgNum = 5;
+ HasRC = true;
+ break;
+ }
+
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ // Make sure rounding mode is either ROUND_CUR_DIRECTION or ROUND_NO_EXC bit
+ // is set. If the intrinsic has rounding control(bits 1:0), make sure its only
+ // combined with ROUND_NO_EXC.
+ if (Result == 4/*ROUND_CUR_DIRECTION*/ ||
+ Result == 8/*ROUND_NO_EXC*/ ||
+ (HasRC && Result.getZExtValue() >= 8 && Result.getZExtValue() <= 11))
+ return false;
+
+ return Diag(TheCall->getLocStart(), diag::err_x86_builtin_invalid_rounding)
+ << Arg->getSourceRange();
+}
+
+bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ if (BuiltinID == X86::BI__builtin_cpu_supports)
return SemaBuiltinCpuSupports(*this, TheCall);
- case X86::BI__builtin_ms_va_start:
+
+ if (BuiltinID == X86::BI__builtin_ms_va_start)
return SemaBuiltinMSVAStart(TheCall);
- case X86::BI__builtin_ia32_extractf64x4_mask:
- case X86::BI__builtin_ia32_extracti64x4_mask:
- case X86::BI__builtin_ia32_extractf32x8_mask:
- case X86::BI__builtin_ia32_extracti32x8_mask:
- case X86::BI__builtin_ia32_extractf64x2_256_mask:
- case X86::BI__builtin_ia32_extracti64x2_256_mask:
- case X86::BI__builtin_ia32_extractf32x4_256_mask:
- case X86::BI__builtin_ia32_extracti32x4_256_mask:
- i = 1; l = 0; u = 1;
- break;
+
+ // If the intrinsic has rounding or SAE make sure its valid.
+ if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
+ return true;
+
+ // For intrinsics which take an immediate value as part of the instruction,
+ // range check them here.
+ int i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
case X86::BI_mm_prefetch:
- case X86::BI__builtin_ia32_extractf32x4_mask:
- case X86::BI__builtin_ia32_extracti32x4_mask:
- case X86::BI__builtin_ia32_extractf64x2_512_mask:
- case X86::BI__builtin_ia32_extracti64x2_512_mask:
i = 1; l = 0; u = 3;
break;
- case X86::BI__builtin_ia32_insertf32x8_mask:
- case X86::BI__builtin_ia32_inserti32x8_mask:
- case X86::BI__builtin_ia32_insertf64x4_mask:
- case X86::BI__builtin_ia32_inserti64x4_mask:
- case X86::BI__builtin_ia32_insertf64x2_256_mask:
- case X86::BI__builtin_ia32_inserti64x2_256_mask:
- case X86::BI__builtin_ia32_insertf32x4_256_mask:
- case X86::BI__builtin_ia32_inserti32x4_256_mask:
- i = 2; l = 0; u = 1;
- break;
case X86::BI__builtin_ia32_sha1rnds4:
case X86::BI__builtin_ia32_shuf_f32x4_256_mask:
case X86::BI__builtin_ia32_shuf_f64x2_256_mask:
case X86::BI__builtin_ia32_shuf_i32x4_256_mask:
case X86::BI__builtin_ia32_shuf_i64x2_256_mask:
- case X86::BI__builtin_ia32_insertf64x2_512_mask:
- case X86::BI__builtin_ia32_inserti64x2_512_mask:
- case X86::BI__builtin_ia32_insertf32x4_mask:
- case X86::BI__builtin_ia32_inserti32x4_mask:
i = 2; l = 0; u = 3;
break;
case X86::BI__builtin_ia32_vpermil2pd:
@@ -1909,33 +2121,6 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_prord256_mask:
case X86::BI__builtin_ia32_prorq128_mask:
case X86::BI__builtin_ia32_prorq256_mask:
- case X86::BI__builtin_ia32_psllwi512_mask:
- case X86::BI__builtin_ia32_psllwi128_mask:
- case X86::BI__builtin_ia32_psllwi256_mask:
- case X86::BI__builtin_ia32_psrldi128_mask:
- case X86::BI__builtin_ia32_psrldi256_mask:
- case X86::BI__builtin_ia32_psrldi512_mask:
- case X86::BI__builtin_ia32_psrlqi128_mask:
- case X86::BI__builtin_ia32_psrlqi256_mask:
- case X86::BI__builtin_ia32_psrlqi512_mask:
- case X86::BI__builtin_ia32_psrawi512_mask:
- case X86::BI__builtin_ia32_psrawi128_mask:
- case X86::BI__builtin_ia32_psrawi256_mask:
- case X86::BI__builtin_ia32_psrlwi512_mask:
- case X86::BI__builtin_ia32_psrlwi128_mask:
- case X86::BI__builtin_ia32_psrlwi256_mask:
- case X86::BI__builtin_ia32_psradi128_mask:
- case X86::BI__builtin_ia32_psradi256_mask:
- case X86::BI__builtin_ia32_psradi512_mask:
- case X86::BI__builtin_ia32_psraqi128_mask:
- case X86::BI__builtin_ia32_psraqi256_mask:
- case X86::BI__builtin_ia32_psraqi512_mask:
- case X86::BI__builtin_ia32_pslldi128_mask:
- case X86::BI__builtin_ia32_pslldi256_mask:
- case X86::BI__builtin_ia32_pslldi512_mask:
- case X86::BI__builtin_ia32_psllqi128_mask:
- case X86::BI__builtin_ia32_psllqi256_mask:
- case X86::BI__builtin_ia32_psllqi512_mask:
case X86::BI__builtin_ia32_fpclasspd128_mask:
case X86::BI__builtin_ia32_fpclasspd256_mask:
case X86::BI__builtin_ia32_fpclassps128_mask:
@@ -1969,15 +2154,7 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
break;
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr256:
- case X86::BI__builtin_ia32_palignr128_mask:
- case X86::BI__builtin_ia32_palignr256_mask:
case X86::BI__builtin_ia32_palignr512_mask:
- case X86::BI__builtin_ia32_alignq512_mask:
- case X86::BI__builtin_ia32_alignd512_mask:
- case X86::BI__builtin_ia32_alignd128_mask:
- case X86::BI__builtin_ia32_alignd256_mask:
- case X86::BI__builtin_ia32_alignq128_mask:
- case X86::BI__builtin_ia32_alignq256_mask:
case X86::BI__builtin_ia32_vcomisd:
case X86::BI__builtin_ia32_vcomiss:
case X86::BI__builtin_ia32_shuf_f32x4_mask:
@@ -2271,7 +2448,9 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
// Refuse POD arguments that weren't caught by the format string
// checks above.
- if (CallType != VariadicDoesNotApply) {
+ auto *FD = dyn_cast_or_null<FunctionDecl>(FDecl);
+ if (CallType != VariadicDoesNotApply &&
+ (!FD || FD->getBuiltinID() != Builtin::BI__noop)) {
unsigned NumParams = Proto ? Proto->getNumParams()
: FDecl && isa<FunctionDecl>(FDecl)
? cast<FunctionDecl>(FDecl)->getNumParams()
@@ -2340,7 +2519,9 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
if (!FnInfo)
return false;
- CheckAbsoluteValueFunction(TheCall, FDecl, FnInfo);
+ CheckAbsoluteValueFunction(TheCall, FDecl);
+ CheckMaxUnsignedZero(TheCall, FDecl);
+
if (getLangOpts().ObjC1)
DiagnoseCStringFormatDirectiveInCFAPI(*this, FDecl, Args, NumArgs);
@@ -2691,6 +2872,9 @@ ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
Ty = Context.getPointerDiffType();
else {
Expr *ValArg = TheCall->getArg(i);
+ // Treat this argument as _Nonnull as we want to show a warning if
+ // NULL is passed into it.
+ CheckNonNullArgument(*this, ValArg, DRE->getLocStart());
unsigned AS = 0;
// Keep address space of non-atomic pointer type.
if (const PointerType *PtrTy =
@@ -3267,21 +3451,46 @@ bool Sema::CheckObjCString(Expr *Arg) {
if (Literal->containsNonAsciiOrNull()) {
StringRef String = Literal->getString();
unsigned NumBytes = String.size();
- SmallVector<UTF16, 128> ToBuf(NumBytes);
- const UTF8 *FromPtr = (const UTF8 *)String.data();
- UTF16 *ToPtr = &ToBuf[0];
-
- ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
- &ToPtr, ToPtr + NumBytes,
- strictConversion);
+ SmallVector<llvm::UTF16, 128> ToBuf(NumBytes);
+ const llvm::UTF8 *FromPtr = (const llvm::UTF8 *)String.data();
+ llvm::UTF16 *ToPtr = &ToBuf[0];
+
+ llvm::ConversionResult Result =
+ llvm::ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes, &ToPtr,
+ ToPtr + NumBytes, llvm::strictConversion);
// Check for conversion failure.
- if (Result != conversionOK)
+ if (Result != llvm::conversionOK)
Diag(Arg->getLocStart(),
diag::warn_cfstring_truncated) << Arg->getSourceRange();
}
return false;
}
+/// CheckObjCString - Checks that the format string argument to the os_log()
+/// and os_trace() functions is correct, and converts it to const char *.
+ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
+ Arg = Arg->IgnoreParenCasts();
+ auto *Literal = dyn_cast<StringLiteral>(Arg);
+ if (!Literal) {
+ if (auto *ObjcLiteral = dyn_cast<ObjCStringLiteral>(Arg)) {
+ Literal = ObjcLiteral->getString();
+ }
+ }
+
+ if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) {
+ return ExprError(
+ Diag(Arg->getLocStart(), diag::err_os_log_format_not_string_constant)
+ << Arg->getSourceRange());
+ }
+
+ ExprResult Result(Literal);
+ QualType ResultTy = Context.getPointerType(Context.CharTy.withConst());
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(Context, ResultTy, false);
+ Result = PerformCopyInitialization(Entity, SourceLocation(), Result);
+ return Result;
+}
+
/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
/// for validity. Emit an error and return true on failure; return false
/// on success.
@@ -3357,8 +3566,17 @@ bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
Diag(TheCall->getArg(1)->getLocStart(),
diag::warn_second_arg_of_va_start_not_last_named_param);
else if (IsCRegister || Type->isReferenceType() ||
- Type->isPromotableIntegerType() ||
- Type->isSpecificBuiltinType(BuiltinType::Float)) {
+ Type->isSpecificBuiltinType(BuiltinType::Float) || [=] {
+ // Promotable integers are UB, but enumerations need a bit of
+ // extra checking to see what their promotable type actually is.
+ if (!Type->isPromotableIntegerType())
+ return false;
+ if (!Type->isEnumeralType())
+ return true;
+ const EnumDecl *ED = Type->getAs<EnumType>()->getDecl();
+ return !(ED &&
+ Context.typesAreCompatible(ED->getPromotionType(), Type));
+ }()) {
unsigned Reason = 0;
if (Type->isReferenceType()) Reason = 1;
else if (IsCRegister) Reason = 2;
@@ -3532,14 +3750,18 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
diag::err_typecheck_call_invalid_unary_fp)
<< OrigArg->getType() << OrigArg->getSourceRange();
- // If this is an implicit conversion from float -> double, remove it.
+ // If this is an implicit conversion from float -> float or double, remove it.
if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) {
- Expr *CastArg = Cast->getSubExpr();
- if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) {
- assert(Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) &&
- "promotion from float to double is the only expected cast here");
- Cast->setSubExpr(nullptr);
- TheCall->setArg(NumArgs-1, CastArg);
+ // Only remove standard FloatCasts, leaving other casts inplace
+ if (Cast->getCastKind() == CK_FloatingCast) {
+ Expr *CastArg = Cast->getSubExpr();
+ if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) {
+ assert((Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) ||
+ Cast->getType()->isSpecificBuiltinType(BuiltinType::Float)) &&
+ "promotion from float to either float or double is the only expected cast here");
+ Cast->setSubExpr(nullptr);
+ TheCall->setArg(NumArgs-1, CastArg);
+ }
}
}
@@ -3696,6 +3918,42 @@ bool Sema::SemaBuiltinAssume(CallExpr *TheCall) {
return false;
}
+/// Handle __builtin_alloca_with_align. This is declared
+/// as (size_t, size_t) where the second size_t must be a power of 2 greater
+/// than 8.
+bool Sema::SemaBuiltinAllocaWithAlign(CallExpr *TheCall) {
+ // The alignment must be a constant integer.
+ Expr *Arg = TheCall->getArg(1);
+
+ // We can't check the value of a dependent argument.
+ if (!Arg->isTypeDependent() && !Arg->isValueDependent()) {
+ if (const auto *UE =
+ dyn_cast<UnaryExprOrTypeTraitExpr>(Arg->IgnoreParenImpCasts()))
+ if (UE->getKind() == UETT_AlignOf)
+ Diag(TheCall->getLocStart(), diag::warn_alloca_align_alignof)
+ << Arg->getSourceRange();
+
+ llvm::APSInt Result = Arg->EvaluateKnownConstInt(Context);
+
+ if (!Result.isPowerOf2())
+ return Diag(TheCall->getLocStart(),
+ diag::err_alignment_not_power_of_two)
+ << Arg->getSourceRange();
+
+ if (Result < Context.getCharWidth())
+ return Diag(TheCall->getLocStart(), diag::err_alignment_too_small)
+ << (unsigned)Context.getCharWidth()
+ << Arg->getSourceRange();
+
+ if (Result > INT32_MAX)
+ return Diag(TheCall->getLocStart(), diag::err_alignment_too_big)
+ << INT32_MAX
+ << Arg->getSourceRange();
+ }
+
+ return false;
+}
+
/// Handle __builtin_assume_aligned. This is declared
/// as (const void*, size_t, ...) and can take one optional constant int arg.
bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
@@ -3734,6 +3992,86 @@ bool Sema::SemaBuiltinAssumeAligned(CallExpr *TheCall) {
return false;
}
+bool Sema::SemaBuiltinOSLogFormat(CallExpr *TheCall) {
+ unsigned BuiltinID =
+ cast<FunctionDecl>(TheCall->getCalleeDecl())->getBuiltinID();
+ bool IsSizeCall = BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size;
+
+ unsigned NumArgs = TheCall->getNumArgs();
+ unsigned NumRequiredArgs = IsSizeCall ? 1 : 2;
+ if (NumArgs < NumRequiredArgs) {
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 /* function call */ << NumRequiredArgs << NumArgs
+ << TheCall->getSourceRange();
+ }
+ if (NumArgs >= NumRequiredArgs + 0x100) {
+ return Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_many_args_at_most)
+ << 0 /* function call */ << (NumRequiredArgs + 0xff) << NumArgs
+ << TheCall->getSourceRange();
+ }
+ unsigned i = 0;
+
+ // For formatting call, check buffer arg.
+ if (!IsSizeCall) {
+ ExprResult Arg(TheCall->getArg(i));
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(
+ Context, Context.VoidPtrTy, false);
+ Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid())
+ return true;
+ TheCall->setArg(i, Arg.get());
+ i++;
+ }
+
+ // Check string literal arg.
+ unsigned FormatIdx = i;
+ {
+ ExprResult Arg = CheckOSLogFormatStringArg(TheCall->getArg(i));
+ if (Arg.isInvalid())
+ return true;
+ TheCall->setArg(i, Arg.get());
+ i++;
+ }
+
+ // Make sure variadic args are scalar.
+ unsigned FirstDataArg = i;
+ while (i < NumArgs) {
+ ExprResult Arg = DefaultVariadicArgumentPromotion(
+ TheCall->getArg(i), VariadicFunction, nullptr);
+ if (Arg.isInvalid())
+ return true;
+ CharUnits ArgSize = Context.getTypeSizeInChars(Arg.get()->getType());
+ if (ArgSize.getQuantity() >= 0x100) {
+ return Diag(Arg.get()->getLocEnd(), diag::err_os_log_argument_too_big)
+ << i << (int)ArgSize.getQuantity() << 0xff
+ << TheCall->getSourceRange();
+ }
+ TheCall->setArg(i, Arg.get());
+ i++;
+ }
+
+ // Check formatting specifiers. NOTE: We're only doing this for the non-size
+ // call to avoid duplicate diagnostics.
+ if (!IsSizeCall) {
+ llvm::SmallBitVector CheckedVarArgs(NumArgs, false);
+ ArrayRef<const Expr *> Args(TheCall->getArgs(), TheCall->getNumArgs());
+ bool Success = CheckFormatArguments(
+ Args, /*HasVAListArg*/ false, FormatIdx, FirstDataArg, FST_OSLog,
+ VariadicFunction, TheCall->getLocStart(), SourceRange(),
+ CheckedVarArgs);
+ if (!Success)
+ return true;
+ }
+
+ if (IsSizeCall) {
+ TheCall->setType(Context.getSizeType());
+ } else {
+ TheCall->setType(Context.VoidPtrTy);
+ }
+ return false;
+}
+
/// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
/// TheCall is a constant expression.
bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
@@ -3861,7 +4199,7 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
SmallVector<int, 5> Ranges;
if (FiveFields)
- Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 7, 15, 15});
+ Ranges.append({IsAArch64Builtin ? 1 : 15, 7, 15, 15, 7});
else
Ranges.append({15, 7, 15});
@@ -3980,7 +4318,95 @@ enum StringLiteralCheckType {
};
} // end anonymous namespace
-static void CheckFormatString(Sema &S, const StringLiteral *FExpr,
+static void sumOffsets(llvm::APSInt &Offset, llvm::APSInt Addend,
+ BinaryOperatorKind BinOpKind,
+ bool AddendIsRight) {
+ unsigned BitWidth = Offset.getBitWidth();
+ unsigned AddendBitWidth = Addend.getBitWidth();
+ // There might be negative interim results.
+ if (Addend.isUnsigned()) {
+ Addend = Addend.zext(++AddendBitWidth);
+ Addend.setIsSigned(true);
+ }
+ // Adjust the bit width of the APSInts.
+ if (AddendBitWidth > BitWidth) {
+ Offset = Offset.sext(AddendBitWidth);
+ BitWidth = AddendBitWidth;
+ } else if (BitWidth > AddendBitWidth) {
+ Addend = Addend.sext(BitWidth);
+ }
+
+ bool Ov = false;
+ llvm::APSInt ResOffset = Offset;
+ if (BinOpKind == BO_Add)
+ ResOffset = Offset.sadd_ov(Addend, Ov);
+ else {
+ assert(AddendIsRight && BinOpKind == BO_Sub &&
+ "operator must be add or sub with addend on the right");
+ ResOffset = Offset.ssub_ov(Addend, Ov);
+ }
+
+ // We add an offset to a pointer here so we should support an offset as big as
+ // possible.
+ if (Ov) {
+ assert(BitWidth <= UINT_MAX / 2 && "index (intermediate) result too big");
+ Offset = Offset.sext(2 * BitWidth);
+ sumOffsets(Offset, Addend, BinOpKind, AddendIsRight);
+ return;
+ }
+
+ Offset = ResOffset;
+}
+
+namespace {
+// This is a wrapper class around StringLiteral to support offsetted string
+// literals as format strings. It takes the offset into account when returning
+// the string and its length or the source locations to display notes correctly.
+class FormatStringLiteral {
+ const StringLiteral *FExpr;
+ int64_t Offset;
+
+ public:
+ FormatStringLiteral(const StringLiteral *fexpr, int64_t Offset = 0)
+ : FExpr(fexpr), Offset(Offset) {}
+
+ StringRef getString() const {
+ return FExpr->getString().drop_front(Offset);
+ }
+
+ unsigned getByteLength() const {
+ return FExpr->getByteLength() - getCharByteWidth() * Offset;
+ }
+ unsigned getLength() const { return FExpr->getLength() - Offset; }
+ unsigned getCharByteWidth() const { return FExpr->getCharByteWidth(); }
+
+ StringLiteral::StringKind getKind() const { return FExpr->getKind(); }
+
+ QualType getType() const { return FExpr->getType(); }
+
+ bool isAscii() const { return FExpr->isAscii(); }
+ bool isWide() const { return FExpr->isWide(); }
+ bool isUTF8() const { return FExpr->isUTF8(); }
+ bool isUTF16() const { return FExpr->isUTF16(); }
+ bool isUTF32() const { return FExpr->isUTF32(); }
+ bool isPascal() const { return FExpr->isPascal(); }
+
+ SourceLocation getLocationOfByte(
+ unsigned ByteNo, const SourceManager &SM, const LangOptions &Features,
+ const TargetInfo &Target, unsigned *StartToken = nullptr,
+ unsigned *StartTokenByteOffset = nullptr) const {
+ return FExpr->getLocationOfByte(ByteNo + Offset, SM, Features, Target,
+ StartToken, StartTokenByteOffset);
+ }
+
+ SourceLocation getLocStart() const LLVM_READONLY {
+ return FExpr->getLocStart().getLocWithOffset(Offset);
+ }
+ SourceLocation getLocEnd() const LLVM_READONLY { return FExpr->getLocEnd(); }
+};
+} // end anonymous namespace
+
+static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
@@ -4001,8 +4427,11 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
unsigned firstDataArg, Sema::FormatStringType Type,
Sema::VariadicCallType CallType, bool InFunctionCall,
llvm::SmallBitVector &CheckedVarArgs,
- UncoveredArgHandler &UncoveredArg) {
+ UncoveredArgHandler &UncoveredArg,
+ llvm::APSInt Offset) {
tryAgain:
+ assert(Offset.isSigned() && "invalid offset");
+
if (E->isTypeDependent() || E->isValueDependent())
return SLCT_NotALiteral;
@@ -4036,6 +4465,10 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
CheckLeft = false;
}
+ // We need to maintain the offsets for the right and the left hand side
+ // separately to check if every possible indexed expression is a valid
+ // string literal. They might have different offsets for different string
+ // literals in the end.
StringLiteralCheckType Left;
if (!CheckLeft)
Left = SLCT_UncheckedLiteral;
@@ -4043,16 +4476,17 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
Left = checkFormatStringExpr(S, C->getTrueExpr(), Args,
HasVAListArg, format_idx, firstDataArg,
Type, CallType, InFunctionCall,
- CheckedVarArgs, UncoveredArg);
- if (Left == SLCT_NotALiteral || !CheckRight)
+ CheckedVarArgs, UncoveredArg, Offset);
+ if (Left == SLCT_NotALiteral || !CheckRight) {
return Left;
+ }
}
StringLiteralCheckType Right =
checkFormatStringExpr(S, C->getFalseExpr(), Args,
HasVAListArg, format_idx, firstDataArg,
Type, CallType, InFunctionCall, CheckedVarArgs,
- UncoveredArg);
+ UncoveredArg, Offset);
return (CheckLeft && Left < Right) ? Left : Right;
}
@@ -4105,8 +4539,8 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
return checkFormatStringExpr(S, Init, Args,
HasVAListArg, format_idx,
firstDataArg, Type, CallType,
- /*InFunctionCall*/false, CheckedVarArgs,
- UncoveredArg);
+ /*InFunctionCall*/ false, CheckedVarArgs,
+ UncoveredArg, Offset);
}
}
@@ -4161,7 +4595,7 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
return checkFormatStringExpr(S, Arg, Args,
HasVAListArg, format_idx, firstDataArg,
Type, CallType, InFunctionCall,
- CheckedVarArgs, UncoveredArg);
+ CheckedVarArgs, UncoveredArg, Offset);
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
unsigned BuiltinID = FD->getBuiltinID();
if (BuiltinID == Builtin::BI__builtin___CFStringMakeConstantString ||
@@ -4171,13 +4605,27 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
HasVAListArg, format_idx,
firstDataArg, Type, CallType,
InFunctionCall, CheckedVarArgs,
- UncoveredArg);
+ UncoveredArg, Offset);
}
}
}
return SLCT_NotALiteral;
}
+ case Stmt::ObjCMessageExprClass: {
+ const auto *ME = cast<ObjCMessageExpr>(E);
+ if (const auto *ND = ME->getMethodDecl()) {
+ if (const auto *FA = ND->getAttr<FormatArgAttr>()) {
+ unsigned ArgIndex = FA->getFormatIdx();
+ const Expr *Arg = ME->getArg(ArgIndex - 1);
+ return checkFormatStringExpr(
+ S, Arg, Args, HasVAListArg, format_idx, firstDataArg, Type,
+ CallType, InFunctionCall, CheckedVarArgs, UncoveredArg, Offset);
+ }
+ }
+
+ return SLCT_NotALiteral;
+ }
case Stmt::ObjCStringLiteralClass:
case Stmt::StringLiteralClass: {
const StringLiteral *StrE = nullptr;
@@ -4188,7 +4636,13 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
StrE = cast<StringLiteral>(E);
if (StrE) {
- CheckFormatString(S, StrE, E, Args, HasVAListArg, format_idx,
+ if (Offset.isNegative() || Offset > StrE->getLength()) {
+ // TODO: It would be better to have an explicit warning for out of
+ // bounds literals.
+ return SLCT_NotALiteral;
+ }
+ FormatStringLiteral FStr(StrE, Offset.sextOrTrunc(64).getSExtValue());
+ CheckFormatString(S, &FStr, E, Args, HasVAListArg, format_idx,
firstDataArg, Type, InFunctionCall, CallType,
CheckedVarArgs, UncoveredArg);
return SLCT_CheckedLiteral;
@@ -4196,6 +4650,50 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
return SLCT_NotALiteral;
}
+ case Stmt::BinaryOperatorClass: {
+ llvm::APSInt LResult;
+ llvm::APSInt RResult;
+
+ const BinaryOperator *BinOp = cast<BinaryOperator>(E);
+
+ // A string literal + an int offset is still a string literal.
+ if (BinOp->isAdditiveOp()) {
+ bool LIsInt = BinOp->getLHS()->EvaluateAsInt(LResult, S.Context);
+ bool RIsInt = BinOp->getRHS()->EvaluateAsInt(RResult, S.Context);
+
+ if (LIsInt != RIsInt) {
+ BinaryOperatorKind BinOpKind = BinOp->getOpcode();
+
+ if (LIsInt) {
+ if (BinOpKind == BO_Add) {
+ sumOffsets(Offset, LResult, BinOpKind, RIsInt);
+ E = BinOp->getRHS();
+ goto tryAgain;
+ }
+ } else {
+ sumOffsets(Offset, RResult, BinOpKind, RIsInt);
+ E = BinOp->getLHS();
+ goto tryAgain;
+ }
+ }
+ }
+
+ return SLCT_NotALiteral;
+ }
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *UnaOp = cast<UnaryOperator>(E);
+ auto ASE = dyn_cast<ArraySubscriptExpr>(UnaOp->getSubExpr());
+ if (UnaOp->getOpcode() == clang::UO_AddrOf && ASE) {
+ llvm::APSInt IndexResult;
+ if (ASE->getRHS()->EvaluateAsInt(IndexResult, S.Context)) {
+ sumOffsets(Offset, IndexResult, BO_Add, /*RHS is int*/ true);
+ E = ASE->getBase();
+ goto tryAgain;
+ }
+ }
+
+ return SLCT_NotALiteral;
+ }
default:
return SLCT_NotALiteral;
@@ -4204,15 +4702,16 @@ checkFormatStringExpr(Sema &S, const Expr *E, ArrayRef<const Expr *> Args,
Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
return llvm::StringSwitch<FormatStringType>(Format->getType()->getName())
- .Case("scanf", FST_Scanf)
- .Cases("printf", "printf0", FST_Printf)
- .Cases("NSString", "CFString", FST_NSString)
- .Case("strftime", FST_Strftime)
- .Case("strfmon", FST_Strfmon)
- .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
- .Case("freebsd_kprintf", FST_FreeBSDKPrintf)
- .Case("os_trace", FST_OSTrace)
- .Default(FST_Unknown);
+ .Case("scanf", FST_Scanf)
+ .Cases("printf", "printf0", FST_Printf)
+ .Cases("NSString", "CFString", FST_NSString)
+ .Case("strftime", FST_Strftime)
+ .Case("strfmon", FST_Strfmon)
+ .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
+ .Case("freebsd_kprintf", FST_FreeBSDKPrintf)
+ .Case("os_trace", FST_OSLog)
+ .Case("os_log", FST_OSLog)
+ .Default(FST_Unknown);
}
/// CheckFormatArguments - Check calls to printf and scanf (and similar
@@ -4262,8 +4761,9 @@ bool Sema::CheckFormatArguments(ArrayRef<const Expr *> Args,
StringLiteralCheckType CT =
checkFormatStringExpr(*this, OrigFormatExpr, Args, HasVAListArg,
format_idx, firstDataArg, Type, CallType,
- /*IsFunctionCall*/true, CheckedVarArgs,
- UncoveredArg);
+ /*IsFunctionCall*/ true, CheckedVarArgs,
+ UncoveredArg,
+ /*no string offset*/ llvm::APSInt(64, false) = 0);
// Generate a diagnostic where an uncovered argument is detected.
if (UncoveredArg.hasUncoveredArg()) {
@@ -4319,8 +4819,9 @@ namespace {
class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
protected:
Sema &S;
- const StringLiteral *FExpr;
+ const FormatStringLiteral *FExpr;
const Expr *OrigFormatExpr;
+ const Sema::FormatStringType FSType;
const unsigned FirstDataArg;
const unsigned NumDataArgs;
const char *Beg; // Start of format string.
@@ -4336,21 +4837,20 @@ protected:
UncoveredArgHandler &UncoveredArg;
public:
- CheckFormatHandler(Sema &s, const StringLiteral *fexpr,
- const Expr *origFormatExpr, unsigned firstDataArg,
+ CheckFormatHandler(Sema &s, const FormatStringLiteral *fexpr,
+ const Expr *origFormatExpr,
+ const Sema::FormatStringType type, unsigned firstDataArg,
unsigned numDataArgs, const char *beg, bool hasVAListArg,
- ArrayRef<const Expr *> Args,
- unsigned formatIdx, bool inFunctionCall,
- Sema::VariadicCallType callType,
+ ArrayRef<const Expr *> Args, unsigned formatIdx,
+ bool inFunctionCall, Sema::VariadicCallType callType,
llvm::SmallBitVector &CheckedVarArgs,
UncoveredArgHandler &UncoveredArg)
- : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr),
- FirstDataArg(firstDataArg), NumDataArgs(numDataArgs),
- Beg(beg), HasVAListArg(hasVAListArg),
- Args(Args), FormatIdx(formatIdx),
- usesPositionalArgs(false), atFirstArg(true),
- inFunctionCall(inFunctionCall), CallType(callType),
- CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
+ : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr), FSType(type),
+ FirstDataArg(firstDataArg), NumDataArgs(numDataArgs), Beg(beg),
+ HasVAListArg(hasVAListArg), Args(Args), FormatIdx(formatIdx),
+ usesPositionalArgs(false), atFirstArg(true),
+ inFunctionCall(inFunctionCall), CallType(callType),
+ CheckedVarArgs(CheckedVarArgs), UncoveredArg(UncoveredArg) {
CoveredArgs.resize(numDataArgs);
CoveredArgs.reset();
}
@@ -4436,7 +4936,8 @@ getSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
}
SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
- return S.getLocationOfStringLiteralByte(FExpr, x - Beg);
+ return FExpr->getLocationOfByte(x - Beg, S.getSourceManager(),
+ S.getLangOpts(), S.Context.getTargetInfo());
}
void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
@@ -4647,16 +5148,16 @@ CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
// hex value.
std::string CodePointStr;
if (!llvm::sys::locale::isPrint(*csStart)) {
- UTF32 CodePoint;
- const UTF8 **B = reinterpret_cast<const UTF8 **>(&csStart);
- const UTF8 *E =
- reinterpret_cast<const UTF8 *>(csStart + csLen);
- ConversionResult Result =
- llvm::convertUTF8Sequence(B, E, &CodePoint, strictConversion);
-
- if (Result != conversionOK) {
+ llvm::UTF32 CodePoint;
+ const llvm::UTF8 **B = reinterpret_cast<const llvm::UTF8 **>(&csStart);
+ const llvm::UTF8 *E =
+ reinterpret_cast<const llvm::UTF8 *>(csStart + csLen);
+ llvm::ConversionResult Result =
+ llvm::convertUTF8Sequence(B, E, &CodePoint, llvm::strictConversion);
+
+ if (Result != llvm::conversionOK) {
unsigned char FirstChar = *csStart;
- CodePoint = (UTF32)FirstChar;
+ CodePoint = (llvm::UTF32)FirstChar;
}
llvm::raw_string_ostream OS(CodePointStr);
@@ -4772,24 +5273,28 @@ void CheckFormatHandler::EmitFormatDiagnostic(
namespace {
class CheckPrintfHandler : public CheckFormatHandler {
- bool ObjCContext;
-
public:
- CheckPrintfHandler(Sema &s, const StringLiteral *fexpr,
- const Expr *origFormatExpr, unsigned firstDataArg,
- unsigned numDataArgs, bool isObjC,
- const char *beg, bool hasVAListArg,
- ArrayRef<const Expr *> Args,
+ CheckPrintfHandler(Sema &s, const FormatStringLiteral *fexpr,
+ const Expr *origFormatExpr,
+ const Sema::FormatStringType type, unsigned firstDataArg,
+ unsigned numDataArgs, bool isObjC, const char *beg,
+ bool hasVAListArg, ArrayRef<const Expr *> Args,
unsigned formatIdx, bool inFunctionCall,
Sema::VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs,
UncoveredArgHandler &UncoveredArg)
- : CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg,
- numDataArgs, beg, hasVAListArg, Args,
- formatIdx, inFunctionCall, CallType, CheckedVarArgs,
- UncoveredArg),
- ObjCContext(isObjC)
- {}
+ : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
+ numDataArgs, beg, hasVAListArg, Args, formatIdx,
+ inFunctionCall, CallType, CheckedVarArgs,
+ UncoveredArg) {}
+
+ bool isObjCContext() const { return FSType == Sema::FST_NSString; }
+
+ /// Returns true if '%@' specifiers are allowed in the format string.
+ bool allowsObjCArg() const {
+ return FSType == Sema::FST_NSString || FSType == Sema::FST_OSLog ||
+ FSType == Sema::FST_OSTrace;
+ }
bool HandleInvalidPrintfConversionSpecifier(
const analyze_printf::PrintfSpecifier &FS,
@@ -5143,11 +5648,54 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
// Check for using an Objective-C specific conversion specifier
// in a non-ObjC literal.
- if (!ObjCContext && CS.isObjCArg()) {
+ if (!allowsObjCArg() && CS.isObjCArg()) {
+ return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
+ specifierLen);
+ }
+
+ // %P can only be used with os_log.
+ if (FSType != Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::PArg) {
return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
specifierLen);
}
+ // %n is not allowed with os_log.
+ if (FSType == Sema::FST_OSLog && CS.getKind() == ConversionSpecifier::nArg) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_os_log_format_narg),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/ false,
+ getSpecifierRange(startSpecifier, specifierLen));
+
+ return true;
+ }
+
+ // Only scalars are allowed for os_trace.
+ if (FSType == Sema::FST_OSTrace &&
+ (CS.getKind() == ConversionSpecifier::PArg ||
+ CS.getKind() == ConversionSpecifier::sArg ||
+ CS.getKind() == ConversionSpecifier::ObjCObjArg)) {
+ return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
+ specifierLen);
+ }
+
+ // Check for use of public/private annotation outside of os_log().
+ if (FSType != Sema::FST_OSLog) {
+ if (FS.isPublic().isSet()) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
+ << "public",
+ getLocationOfByte(FS.isPublic().getPosition()),
+ /*IsStringLocation*/ false,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+ if (FS.isPrivate().isSet()) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_annotation)
+ << "private",
+ getLocationOfByte(FS.isPrivate().getPosition()),
+ /*IsStringLocation*/ false,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+ }
+
// Check for invalid use of field width
if (!FS.hasValidFieldWidth()) {
HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
@@ -5160,6 +5708,15 @@ CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
startSpecifier, specifierLen);
}
+ // Precision is mandatory for %P specifier.
+ if (CS.getKind() == ConversionSpecifier::PArg &&
+ FS.getPrecision().getHowSpecified() == OptionalAmount::NotSpecified) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_P_no_precision),
+ getLocationOfByte(startSpecifier),
+ /*IsStringLocation*/ false,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+
// Check each flag does not conflict with any other component.
if (!FS.hasValidThousandsGroupingPrefix())
HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen);
@@ -5309,8 +5866,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
using namespace analyze_printf;
// Now type check the data expression that matches the
// format specifier.
- const analyze_printf::ArgType &AT = FS.getArgType(S.Context,
- ObjCContext);
+ const analyze_printf::ArgType &AT = FS.getArgType(S.Context, isObjCContext());
if (!AT.isValid())
return true;
@@ -5365,7 +5921,7 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// If the argument is an integer of some kind, believe the %C and suggest
// a cast instead of changing the conversion specifier.
QualType IntendedTy = ExprTy;
- if (ObjCContext &&
+ if (isObjCContext() &&
FS.getConversionSpecifier().getKind() == ConversionSpecifier::CArg) {
if (ExprTy->isIntegralOrUnscopedEnumerationType() &&
!ExprTy->isCharType()) {
@@ -5406,8 +5962,8 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
// We may be able to offer a FixItHint if it is a supported type.
PrintfSpecifier fixedFS = FS;
- bool success = fixedFS.fixType(IntendedTy, S.getLangOpts(),
- S.Context, ObjCContext);
+ bool success =
+ fixedFS.fixType(IntendedTy, S.getLangOpts(), S.Context, isObjCContext());
if (success) {
// Get the fix string from the fixed format specifier
@@ -5562,20 +6118,19 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
namespace {
class CheckScanfHandler : public CheckFormatHandler {
public:
- CheckScanfHandler(Sema &s, const StringLiteral *fexpr,
- const Expr *origFormatExpr, unsigned firstDataArg,
- unsigned numDataArgs, const char *beg, bool hasVAListArg,
- ArrayRef<const Expr *> Args,
- unsigned formatIdx, bool inFunctionCall,
- Sema::VariadicCallType CallType,
+ CheckScanfHandler(Sema &s, const FormatStringLiteral *fexpr,
+ const Expr *origFormatExpr, Sema::FormatStringType type,
+ unsigned firstDataArg, unsigned numDataArgs,
+ const char *beg, bool hasVAListArg,
+ ArrayRef<const Expr *> Args, unsigned formatIdx,
+ bool inFunctionCall, Sema::VariadicCallType CallType,
llvm::SmallBitVector &CheckedVarArgs,
UncoveredArgHandler &UncoveredArg)
- : CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg,
- numDataArgs, beg, hasVAListArg,
- Args, formatIdx, inFunctionCall, CallType,
- CheckedVarArgs, UncoveredArg)
- {}
-
+ : CheckFormatHandler(s, fexpr, origFormatExpr, type, firstDataArg,
+ numDataArgs, beg, hasVAListArg, Args, formatIdx,
+ inFunctionCall, CallType, CheckedVarArgs,
+ UncoveredArg) {}
+
bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
const char *startSpecifier,
unsigned specifierLen) override;
@@ -5733,7 +6288,7 @@ bool CheckScanfHandler::HandleScanfSpecifier(
return true;
}
-static void CheckFormatString(Sema &S, const StringLiteral *FExpr,
+static void CheckFormatString(Sema &S, const FormatStringLiteral *FExpr,
const Expr *OrigFormatExpr,
ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
@@ -5785,13 +6340,13 @@ static void CheckFormatString(Sema &S, const StringLiteral *FExpr,
}
if (Type == Sema::FST_Printf || Type == Sema::FST_NSString ||
- Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSTrace) {
- CheckPrintfHandler H(S, FExpr, OrigFormatExpr, firstDataArg,
- numDataArgs, (Type == Sema::FST_NSString ||
- Type == Sema::FST_OSTrace),
- Str, HasVAListArg, Args, format_idx,
- inFunctionCall, CallType, CheckedVarArgs,
- UncoveredArg);
+ Type == Sema::FST_FreeBSDKPrintf || Type == Sema::FST_OSLog ||
+ Type == Sema::FST_OSTrace) {
+ CheckPrintfHandler H(
+ S, FExpr, OrigFormatExpr, Type, firstDataArg, numDataArgs,
+ (Type == Sema::FST_NSString || Type == Sema::FST_OSTrace), Str,
+ HasVAListArg, Args, format_idx, inFunctionCall, CallType,
+ CheckedVarArgs, UncoveredArg);
if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
S.getLangOpts(),
@@ -5799,10 +6354,9 @@ static void CheckFormatString(Sema &S, const StringLiteral *FExpr,
Type == Sema::FST_FreeBSDKPrintf))
H.DoneProcessing();
} else if (Type == Sema::FST_Scanf) {
- CheckScanfHandler H(S, FExpr, OrigFormatExpr, firstDataArg, numDataArgs,
- Str, HasVAListArg, Args, format_idx,
- inFunctionCall, CallType, CheckedVarArgs,
- UncoveredArg);
+ CheckScanfHandler H(S, FExpr, OrigFormatExpr, Type, firstDataArg,
+ numDataArgs, Str, HasVAListArg, Args, format_idx,
+ inFunctionCall, CallType, CheckedVarArgs, UncoveredArg);
if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
S.getLangOpts(),
@@ -6118,23 +6672,14 @@ static void emitReplacement(Sema &S, SourceLocation Loc, SourceRange Range,
<< FunctionName;
}
-static bool IsFunctionStdAbs(const FunctionDecl *FDecl) {
+template <std::size_t StrLen>
+static bool IsStdFunction(const FunctionDecl *FDecl,
+ const char (&Str)[StrLen]) {
if (!FDecl)
return false;
-
- if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr("abs"))
- return false;
-
- const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(FDecl->getDeclContext());
-
- while (ND && ND->isInlineNamespace()) {
- ND = dyn_cast<NamespaceDecl>(ND->getDeclContext());
- }
-
- if (!ND || !ND->getIdentifier() || !ND->getIdentifier()->isStr("std"))
+ if (!FDecl->getIdentifier() || !FDecl->getIdentifier()->isStr(Str))
return false;
-
- if (!isa<TranslationUnitDecl>(ND->getDeclContext()))
+ if (!FDecl->isInStdNamespace())
return false;
return true;
@@ -6142,13 +6687,12 @@ static bool IsFunctionStdAbs(const FunctionDecl *FDecl) {
// Warn when using the wrong abs() function.
void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
- const FunctionDecl *FDecl,
- IdentifierInfo *FnInfo) {
+ const FunctionDecl *FDecl) {
if (Call->getNumArgs() != 1)
return;
unsigned AbsKind = getAbsoluteValueFunctionKind(FDecl);
- bool IsStdAbs = IsFunctionStdAbs(FDecl);
+ bool IsStdAbs = IsStdFunction(FDecl, "abs");
if (AbsKind == 0 && !IsStdAbs)
return;
@@ -6221,6 +6765,69 @@ void Sema::CheckAbsoluteValueFunction(const CallExpr *Call,
Call->getCallee()->getSourceRange(), NewAbsKind, ArgType);
}
+//===--- CHECK: Warn on use of std::max and unsigned zero. r---------------===//
+void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
+ const FunctionDecl *FDecl) {
+ if (!Call || !FDecl) return;
+
+ // Ignore template specializations and macros.
+ if (!ActiveTemplateInstantiations.empty()) return;
+ if (Call->getExprLoc().isMacroID()) return;
+
+ // Only care about the one template argument, two function parameter std::max
+ if (Call->getNumArgs() != 2) return;
+ if (!IsStdFunction(FDecl, "max")) return;
+ const auto * ArgList = FDecl->getTemplateSpecializationArgs();
+ if (!ArgList) return;
+ if (ArgList->size() != 1) return;
+
+ // Check that template type argument is unsigned integer.
+ const auto& TA = ArgList->get(0);
+ if (TA.getKind() != TemplateArgument::Type) return;
+ QualType ArgType = TA.getAsType();
+ if (!ArgType->isUnsignedIntegerType()) return;
+
+ // See if either argument is a literal zero.
+ auto IsLiteralZeroArg = [](const Expr* E) -> bool {
+ const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(E);
+ if (!MTE) return false;
+ const auto *Num = dyn_cast<IntegerLiteral>(MTE->GetTemporaryExpr());
+ if (!Num) return false;
+ if (Num->getValue() != 0) return false;
+ return true;
+ };
+
+ const Expr *FirstArg = Call->getArg(0);
+ const Expr *SecondArg = Call->getArg(1);
+ const bool IsFirstArgZero = IsLiteralZeroArg(FirstArg);
+ const bool IsSecondArgZero = IsLiteralZeroArg(SecondArg);
+
+ // Only warn when exactly one argument is zero.
+ if (IsFirstArgZero == IsSecondArgZero) return;
+
+ SourceRange FirstRange = FirstArg->getSourceRange();
+ SourceRange SecondRange = SecondArg->getSourceRange();
+
+ SourceRange ZeroRange = IsFirstArgZero ? FirstRange : SecondRange;
+
+ Diag(Call->getExprLoc(), diag::warn_max_unsigned_zero)
+ << IsFirstArgZero << Call->getCallee()->getSourceRange() << ZeroRange;
+
+ // Deduce what parts to remove so that "std::max(0u, foo)" becomes "(foo)".
+ SourceRange RemovalRange;
+ if (IsFirstArgZero) {
+ RemovalRange = SourceRange(FirstRange.getBegin(),
+ SecondRange.getBegin().getLocWithOffset(-1));
+ } else {
+ RemovalRange = SourceRange(getLocForEndOfToken(FirstRange.getEnd()),
+ SecondRange.getEnd());
+ }
+
+ Diag(Call->getExprLoc(), diag::note_remove_max_call)
+ << FixItHint::CreateRemoval(Call->getCallee()->getSourceRange())
+ << FixItHint::CreateRemoval(RemovalRange);
+}
+
//===--- CHECK: Standard memory functions ---------------------------------===//
/// \brief Takes the expression passed to the size_t parameter of functions
@@ -6320,13 +6927,15 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
// It is possible to have a non-standard definition of memset. Validate
// we have enough arguments, and if not, abort further checking.
- unsigned ExpectedNumArgs = (BId == Builtin::BIstrndup ? 2 : 3);
+ unsigned ExpectedNumArgs =
+ (BId == Builtin::BIstrndup || BId == Builtin::BIbzero ? 2 : 3);
if (Call->getNumArgs() < ExpectedNumArgs)
return;
- unsigned LastArg = (BId == Builtin::BImemset ||
+ unsigned LastArg = (BId == Builtin::BImemset || BId == Builtin::BIbzero ||
BId == Builtin::BIstrndup ? 1 : 2);
- unsigned LenArg = (BId == Builtin::BIstrndup ? 1 : 2);
+ unsigned LenArg =
+ (BId == Builtin::BIbzero || BId == Builtin::BIstrndup ? 1 : 2);
const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
if (CheckMemorySizeofForComparison(*this, LenExpr, FnName,
@@ -6338,6 +6947,13 @@ void Sema::CheckMemaccessArguments(const CallExpr *Call,
const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
llvm::FoldingSetNodeID SizeOfArgID;
+ // Although widely used, 'bzero' is not a standard function. Be more strict
+ // with the argument types before allowing diagnostics and only allow the
+ // form bzero(ptr, sizeof(...)).
+ QualType FirstArgTy = Call->getArg(0)->IgnoreParenImpCasts()->getType();
+ if (BId == Builtin::BIbzero && !FirstArgTy->getAs<PointerType>())
+ return;
+
for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts();
SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange();
@@ -7960,6 +8576,24 @@ bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
return false;
// White-list bool bitfields.
+ QualType BitfieldType = Bitfield->getType();
+ if (BitfieldType->isBooleanType())
+ return false;
+
+ if (BitfieldType->isEnumeralType()) {
+ EnumDecl *BitfieldEnumDecl = BitfieldType->getAs<EnumType>()->getDecl();
+ // If the underlying enum type was not explicitly specified as an unsigned
+ // type and the enum contain only positive values, MSVC++ will cause an
+ // inconsistency by storing this as a signed type.
+ if (S.getLangOpts().CPlusPlus11 &&
+ !BitfieldEnumDecl->getIntegerTypeSourceInfo() &&
+ BitfieldEnumDecl->getNumPositiveBits() > 0 &&
+ BitfieldEnumDecl->getNumNegativeBits() == 0) {
+ S.Diag(InitLoc, diag::warn_no_underlying_type_specified_for_enum_bitfield)
+ << BitfieldEnumDecl->getNameAsString();
+ }
+ }
+
if (Bitfield->getType()->isBooleanType())
return false;
@@ -7979,18 +8613,17 @@ bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
unsigned OriginalWidth = Value.getBitWidth();
unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
- if (Value.isSigned() && Value.isNegative())
+ if (!Value.isSigned() || Value.isNegative())
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit))
- if (UO->getOpcode() == UO_Minus)
- if (isa<IntegerLiteral>(UO->getSubExpr()))
- OriginalWidth = Value.getMinSignedBits();
+ if (UO->getOpcode() == UO_Minus || UO->getOpcode() == UO_Not)
+ OriginalWidth = Value.getMinSignedBits();
if (OriginalWidth <= FieldWidth)
return false;
// Compute the value which the bitfield will contain.
llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
- TruncatedValue.setIsSigned(Bitfield->getType()->isSignedIntegerType());
+ TruncatedValue.setIsSigned(BitfieldType->isSignedIntegerType());
// Check whether the stored value is equal to the original value.
TruncatedValue = TruncatedValue.extend(OriginalWidth);
@@ -8515,6 +9148,8 @@ void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
DiagnoseNullConversion(S, E, T, CC);
+ S.DiscardMisalignedMemberAddress(Target, E);
+
if (!Source->isIntegerType() || !Target->isIntegerType())
return;
@@ -8776,25 +9411,19 @@ void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
} // end anonymous namespace
-static bool checkOpenCLEnqueueLocalSizeArgs(Sema &S, CallExpr *TheCall,
- unsigned Start, unsigned End) {
- bool IllegalParams = false;
- for (unsigned I = Start; I <= End; ++I) {
- QualType Ty = TheCall->getArg(I)->getType();
- // Taking into account implicit conversions,
- // allow any integer within 32 bits range
- if (!Ty->isIntegerType() ||
- S.Context.getTypeSizeInChars(Ty).getQuantity() > 4) {
- S.Diag(TheCall->getArg(I)->getLocStart(),
- diag::err_opencl_enqueue_kernel_invalid_local_size_type);
- IllegalParams = true;
- }
- // Potentially emit standard warnings for implicit conversions if enabled
- // using -Wconversion.
- CheckImplicitConversion(S, TheCall->getArg(I), S.Context.UnsignedIntTy,
- TheCall->getArg(I)->getLocStart());
+/// Diagnose integer type and any valid implicit convertion to it.
+static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E, const QualType &IntT) {
+ // Taking into account implicit conversions,
+ // allow any integer.
+ if (!E->getType()->isIntegerType()) {
+ S.Diag(E->getLocStart(),
+ diag::err_opencl_enqueue_kernel_invalid_local_size_type);
+ return true;
}
- return IllegalParams;
+ // Potentially emit standard warnings for implicit conversions if enabled
+ // using -Wconversion.
+ CheckImplicitConversion(S, E, IntT, E->getLocStart());
+ return false;
}
// Helper function for Sema::DiagnoseAlwaysNonNullPointer.
@@ -9585,6 +10214,7 @@ void Sema::CheckCompletedExpr(Expr *E, SourceLocation CheckLoc,
CheckUnsequencedOperations(E);
if (!IsConstexpr && !E->isValueDependent())
CheckForIntOverflow(E);
+ DiagnoseMisalignedMembers();
}
void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
@@ -9695,6 +10325,19 @@ bool Sema::CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
return HasInvalidParm;
}
+/// A helper function to get the alignment of a Decl referred to by DeclRefExpr
+/// or MemberExpr.
+static CharUnits getDeclAlign(Expr *E, CharUnits TypeAlign,
+ ASTContext &Context) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
+ return Context.getDeclAlign(DRE->getDecl());
+
+ if (const auto *ME = dyn_cast<MemberExpr>(E))
+ return Context.getDeclAlign(ME->getMemberDecl());
+
+ return TypeAlign;
+}
+
/// CheckCastAlign - Implements -Wcast-align, which warns when a
/// pointer cast increases the alignment requirements.
void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
@@ -9729,6 +10372,15 @@ void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
if (SrcPointee->isIncompleteType()) return;
CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee);
+
+ if (auto *CE = dyn_cast<CastExpr>(Op)) {
+ if (CE->getCastKind() == CK_ArrayToPointerDecay)
+ SrcAlign = getDeclAlign(CE->getSubExpr(), SrcAlign, Context);
+ } else if (auto *UO = dyn_cast<UnaryOperator>(Op)) {
+ if (UO->getOpcode() == UO_AddrOf)
+ SrcAlign = getDeclAlign(UO->getSubExpr(), SrcAlign, Context);
+ }
+
if (SrcAlign >= DestAlign) return;
Diag(TRange.getBegin(), diag::warn_cast_align)
@@ -11130,3 +11782,151 @@ void Sema::CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
<< ArgumentExpr->getSourceRange()
<< TypeTagExpr->getSourceRange();
}
+
+void Sema::AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
+ CharUnits Alignment) {
+ MisalignedMembers.emplace_back(E, RD, MD, Alignment);
+}
+
+void Sema::DiagnoseMisalignedMembers() {
+ for (MisalignedMember &m : MisalignedMembers) {
+ const NamedDecl *ND = m.RD;
+ if (ND->getName().empty()) {
+ if (const TypedefNameDecl *TD = m.RD->getTypedefNameForAnonDecl())
+ ND = TD;
+ }
+ Diag(m.E->getLocStart(), diag::warn_taking_address_of_packed_member)
+ << m.MD << ND << m.E->getSourceRange();
+ }
+ MisalignedMembers.clear();
+}
+
+void Sema::DiscardMisalignedMemberAddress(const Type *T, Expr *E) {
+ E = E->IgnoreParens();
+ if (!T->isPointerType() && !T->isIntegerType())
+ return;
+ if (isa<UnaryOperator>(E) &&
+ cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf) {
+ auto *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
+ if (isa<MemberExpr>(Op)) {
+ auto MA = std::find(MisalignedMembers.begin(), MisalignedMembers.end(),
+ MisalignedMember(Op));
+ if (MA != MisalignedMembers.end() &&
+ (T->isIntegerType() ||
+ (T->isPointerType() &&
+ Context.getTypeAlignInChars(T->getPointeeType()) <= MA->Alignment)))
+ MisalignedMembers.erase(MA);
+ }
+ }
+}
+
+void Sema::RefersToMemberWithReducedAlignment(
+ Expr *E,
+ llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
+ Action) {
+ const auto *ME = dyn_cast<MemberExpr>(E);
+ if (!ME)
+ return;
+
+ // For a chain of MemberExpr like "a.b.c.d" this list
+ // will keep FieldDecl's like [d, c, b].
+ SmallVector<FieldDecl *, 4> ReverseMemberChain;
+ const MemberExpr *TopME = nullptr;
+ bool AnyIsPacked = false;
+ do {
+ QualType BaseType = ME->getBase()->getType();
+ if (ME->isArrow())
+ BaseType = BaseType->getPointeeType();
+ RecordDecl *RD = BaseType->getAs<RecordType>()->getDecl();
+
+ ValueDecl *MD = ME->getMemberDecl();
+ auto *FD = dyn_cast<FieldDecl>(MD);
+ // We do not care about non-data members.
+ if (!FD || FD->isInvalidDecl())
+ return;
+
+ AnyIsPacked =
+ AnyIsPacked || (RD->hasAttr<PackedAttr>() || MD->hasAttr<PackedAttr>());
+ ReverseMemberChain.push_back(FD);
+
+ TopME = ME;
+ ME = dyn_cast<MemberExpr>(ME->getBase()->IgnoreParens());
+ } while (ME);
+ assert(TopME && "We did not compute a topmost MemberExpr!");
+
+ // Not the scope of this diagnostic.
+ if (!AnyIsPacked)
+ return;
+
+ const Expr *TopBase = TopME->getBase()->IgnoreParenImpCasts();
+ const auto *DRE = dyn_cast<DeclRefExpr>(TopBase);
+ // TODO: The innermost base of the member expression may be too complicated.
+ // For now, just disregard these cases. This is left for future
+ // improvement.
+ if (!DRE && !isa<CXXThisExpr>(TopBase))
+ return;
+
+ // Alignment expected by the whole expression.
+ CharUnits ExpectedAlignment = Context.getTypeAlignInChars(E->getType());
+
+ // No need to do anything else with this case.
+ if (ExpectedAlignment.isOne())
+ return;
+
+ // Synthesize offset of the whole access.
+ CharUnits Offset;
+ for (auto I = ReverseMemberChain.rbegin(); I != ReverseMemberChain.rend();
+ I++) {
+ Offset += Context.toCharUnitsFromBits(Context.getFieldOffset(*I));
+ }
+
+ // Compute the CompleteObjectAlignment as the alignment of the whole chain.
+ CharUnits CompleteObjectAlignment = Context.getTypeAlignInChars(
+ ReverseMemberChain.back()->getParent()->getTypeForDecl());
+
+ // The base expression of the innermost MemberExpr may give
+ // stronger guarantees than the class containing the member.
+ if (DRE && !TopME->isArrow()) {
+ const ValueDecl *VD = DRE->getDecl();
+ if (!VD->getType()->isReferenceType())
+ CompleteObjectAlignment =
+ std::max(CompleteObjectAlignment, Context.getDeclAlign(VD));
+ }
+
+ // Check if the synthesized offset fulfills the alignment.
+ if (Offset % ExpectedAlignment != 0 ||
+ // It may fulfill the offset it but the effective alignment may still be
+ // lower than the expected expression alignment.
+ CompleteObjectAlignment < ExpectedAlignment) {
+ // If this happens, we want to determine a sensible culprit of this.
+ // Intuitively, watching the chain of member expressions from right to
+ // left, we start with the required alignment (as required by the field
+ // type) but some packed attribute in that chain has reduced the alignment.
+ // It may happen that another packed structure increases it again. But if
+ // we are here such increase has not been enough. So pointing the first
+ // FieldDecl that either is packed or else its RecordDecl is,
+ // seems reasonable.
+ FieldDecl *FD = nullptr;
+ CharUnits Alignment;
+ for (FieldDecl *FDI : ReverseMemberChain) {
+ if (FDI->hasAttr<PackedAttr>() ||
+ FDI->getParent()->hasAttr<PackedAttr>()) {
+ FD = FDI;
+ Alignment = std::min(
+ Context.getTypeAlignInChars(FD->getType()),
+ Context.getTypeAlignInChars(FD->getParent()->getTypeForDecl()));
+ break;
+ }
+ }
+ assert(FD && "We did not find a packed FieldDecl!");
+ Action(E, FD->getParent(), FD, Alignment);
+ }
+}
+
+void Sema::CheckAddressOfPackedMember(Expr *rhs) {
+ using namespace std::placeholders;
+ RefersToMemberWithReducedAlignment(
+ rhs, std::bind(&Sema::AddPotentialMisalignedMembers, std::ref(*this), _1,
+ _2, _3, _4));
+}
+
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index 36babc4bc0cd..d76bde574677 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -2162,6 +2162,60 @@ static std::string formatObjCParamQualifiers(unsigned ObjCQuals,
return Result;
}
+/// \brief Tries to find the most appropriate type location for an Objective-C
+/// block placeholder.
+///
+/// This function ignores things like typedefs and qualifiers in order to
+/// present the most relevant and accurate block placeholders in code completion
+/// results.
+static void findTypeLocationForBlockDecl(const TypeSourceInfo *TSInfo,
+ FunctionTypeLoc &Block,
+ FunctionProtoTypeLoc &BlockProto,
+ bool SuppressBlock = false) {
+ if (!TSInfo)
+ return;
+ TypeLoc TL = TSInfo->getTypeLoc().getUnqualifiedLoc();
+ while (true) {
+ // Look through typedefs.
+ if (!SuppressBlock) {
+ if (TypedefTypeLoc TypedefTL = TL.getAs<TypedefTypeLoc>()) {
+ if (TypeSourceInfo *InnerTSInfo =
+ TypedefTL.getTypedefNameDecl()->getTypeSourceInfo()) {
+ TL = InnerTSInfo->getTypeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ }
+
+ // Look through qualified types
+ if (QualifiedTypeLoc QualifiedTL = TL.getAs<QualifiedTypeLoc>()) {
+ TL = QualifiedTL.getUnqualifiedLoc();
+ continue;
+ }
+
+ if (AttributedTypeLoc AttrTL = TL.getAs<AttributedTypeLoc>()) {
+ TL = AttrTL.getModifiedLoc();
+ continue;
+ }
+ }
+
+ // Try to get the function prototype behind the block pointer type,
+ // then we're done.
+ if (BlockPointerTypeLoc BlockPtr = TL.getAs<BlockPointerTypeLoc>()) {
+ TL = BlockPtr.getPointeeLoc().IgnoreParens();
+ Block = TL.getAs<FunctionTypeLoc>();
+ BlockProto = TL.getAs<FunctionProtoTypeLoc>();
+ }
+ break;
+ }
+}
+
+static std::string
+formatBlockPlaceholder(const PrintingPolicy &Policy, const NamedDecl *BlockDecl,
+ FunctionTypeLoc &Block, FunctionProtoTypeLoc &BlockProto,
+ bool SuppressBlockName = false,
+ bool SuppressBlock = false,
+ Optional<ArrayRef<QualType>> ObjCSubsts = None);
+
static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
const ParmVarDecl *Param,
bool SuppressName = false,
@@ -2192,47 +2246,13 @@ static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
}
return Result;
}
-
+
// The argument for a block pointer parameter is a block literal with
// the appropriate type.
FunctionTypeLoc Block;
FunctionProtoTypeLoc BlockProto;
- TypeLoc TL;
- if (TypeSourceInfo *TSInfo = Param->getTypeSourceInfo()) {
- TL = TSInfo->getTypeLoc().getUnqualifiedLoc();
- while (true) {
- // Look through typedefs.
- if (!SuppressBlock) {
- if (TypedefTypeLoc TypedefTL = TL.getAs<TypedefTypeLoc>()) {
- if (TypeSourceInfo *InnerTSInfo =
- TypedefTL.getTypedefNameDecl()->getTypeSourceInfo()) {
- TL = InnerTSInfo->getTypeLoc().getUnqualifiedLoc();
- continue;
- }
- }
-
- // Look through qualified types
- if (QualifiedTypeLoc QualifiedTL = TL.getAs<QualifiedTypeLoc>()) {
- TL = QualifiedTL.getUnqualifiedLoc();
- continue;
- }
-
- if (AttributedTypeLoc AttrTL = TL.getAs<AttributedTypeLoc>()) {
- TL = AttrTL.getModifiedLoc();
- continue;
- }
- }
-
- // Try to get the function prototype behind the block pointer type,
- // then we're done.
- if (BlockPointerTypeLoc BlockPtr = TL.getAs<BlockPointerTypeLoc>()) {
- TL = BlockPtr.getPointeeLoc().IgnoreParens();
- Block = TL.getAs<FunctionTypeLoc>();
- BlockProto = TL.getAs<FunctionProtoTypeLoc>();
- }
- break;
- }
- }
+ findTypeLocationForBlockDecl(Param->getTypeSourceInfo(), Block, BlockProto,
+ SuppressBlock);
if (!Block) {
// We were unable to find a FunctionProtoTypeLoc with parameter names
@@ -2244,9 +2264,13 @@ static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
QualType Type = Param->getType().getUnqualifiedType();
if (ObjCMethodParam) {
- Result = "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier(),
- Type);
- Result += Type.getAsString(Policy) + Result + ")";
+ Result = Type.getAsString(Policy);
+ std::string Quals =
+ formatObjCParamQualifiers(Param->getObjCDeclQualifier(), Type);
+ if (!Quals.empty())
+ Result = "(" + Quals + " " + Result + ")";
+ if (Result.back() != ')')
+ Result += " ";
if (Param->getIdentifier())
Result += Param->getIdentifier()->getName();
} else {
@@ -2255,15 +2279,34 @@ static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
return Result;
}
-
+
// We have the function prototype behind the block pointer type, as it was
// written in the source.
+ return formatBlockPlaceholder(Policy, Param, Block, BlockProto,
+ /*SuppressBlockName=*/false, SuppressBlock,
+ ObjCSubsts);
+}
+
+/// \brief Returns a placeholder string that corresponds to an Objective-C block
+/// declaration.
+///
+/// \param BlockDecl A declaration with an Objective-C block type.
+///
+/// \param Block The most relevant type location for that block type.
+///
+/// \param SuppressBlockName Determines wether or not the name of the block
+/// declaration is included in the resulting string.
+static std::string
+formatBlockPlaceholder(const PrintingPolicy &Policy, const NamedDecl *BlockDecl,
+ FunctionTypeLoc &Block, FunctionProtoTypeLoc &BlockProto,
+ bool SuppressBlockName, bool SuppressBlock,
+ Optional<ArrayRef<QualType>> ObjCSubsts) {
std::string Result;
QualType ResultType = Block.getTypePtr()->getReturnType();
if (ObjCSubsts)
- ResultType = ResultType.substObjCTypeArgs(Param->getASTContext(),
- *ObjCSubsts,
- ObjCSubstitutionContext::Result);
+ ResultType =
+ ResultType.substObjCTypeArgs(BlockDecl->getASTContext(), *ObjCSubsts,
+ ObjCSubstitutionContext::Result);
if (!ResultType->isVoidType() || SuppressBlock)
ResultType.getAsStringInternal(Result, Policy);
@@ -2281,31 +2324,30 @@ static std::string FormatFunctionParameter(const PrintingPolicy &Policy,
Params += ", ";
Params += FormatFunctionParameter(Policy, Block.getParam(I),
/*SuppressName=*/false,
- /*SuppressBlock=*/true,
- ObjCSubsts);
+ /*SuppressBlock=*/true, ObjCSubsts);
if (I == N - 1 && BlockProto.getTypePtr()->isVariadic())
Params += ", ...";
}
Params += ")";
}
-
+
if (SuppressBlock) {
// Format as a parameter.
Result = Result + " (^";
- if (Param->getIdentifier())
- Result += Param->getIdentifier()->getName();
+ if (!SuppressBlockName && BlockDecl->getIdentifier())
+ Result += BlockDecl->getIdentifier()->getName();
Result += ")";
Result += Params;
} else {
// Format as a block literal argument.
Result = '^' + Result;
Result += Params;
-
- if (Param->getIdentifier())
- Result += Param->getIdentifier()->getName();
+
+ if (!SuppressBlockName && BlockDecl->getIdentifier())
+ Result += BlockDecl->getIdentifier()->getName();
}
-
+
return Result;
}
@@ -3062,6 +3104,7 @@ CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
return CXCursor_ClassTemplatePartialSpecialization;
case Decl::UsingDirective: return CXCursor_UsingDirective;
case Decl::StaticAssert: return CXCursor_StaticAssert;
+ case Decl::Friend: return CXCursor_FriendDecl;
case Decl::TranslationUnit: return CXCursor_TranslationUnit;
case Decl::Using:
@@ -3573,82 +3616,204 @@ static ObjCContainerDecl *getContainerDef(ObjCContainerDecl *Container) {
return Container;
}
-static void AddObjCProperties(const CodeCompletionContext &CCContext,
- ObjCContainerDecl *Container,
- bool AllowCategories,
- bool AllowNullaryMethods,
- DeclContext *CurContext,
- AddedPropertiesSet &AddedProperties,
- ResultBuilder &Results) {
+/// \brief Adds a block invocation code completion result for the given block
+/// declaration \p BD.
+static void AddObjCBlockCall(ASTContext &Context, const PrintingPolicy &Policy,
+ CodeCompletionBuilder &Builder,
+ const NamedDecl *BD,
+ const FunctionTypeLoc &BlockLoc,
+ const FunctionProtoTypeLoc &BlockProtoLoc) {
+ Builder.AddResultTypeChunk(
+ GetCompletionTypeString(BlockLoc.getReturnLoc().getType(), Context,
+ Policy, Builder.getAllocator()));
+
+ AddTypedNameChunk(Context, Policy, BD, Builder);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+
+ if (BlockProtoLoc && BlockProtoLoc.getTypePtr()->isVariadic()) {
+ Builder.AddPlaceholderChunk("...");
+ } else {
+ for (unsigned I = 0, N = BlockLoc.getNumParams(); I != N; ++I) {
+ if (I)
+ Builder.AddChunk(CodeCompletionString::CK_Comma);
+
+ // Format the placeholder string.
+ std::string PlaceholderStr =
+ FormatFunctionParameter(Policy, BlockLoc.getParam(I));
+
+ if (I == N - 1 && BlockProtoLoc &&
+ BlockProtoLoc.getTypePtr()->isVariadic())
+ PlaceholderStr += ", ...";
+
+ // Add the placeholder string.
+ Builder.AddPlaceholderChunk(
+ Builder.getAllocator().CopyString(PlaceholderStr));
+ }
+ }
+
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+}
+
+static void AddObjCProperties(
+ const CodeCompletionContext &CCContext, ObjCContainerDecl *Container,
+ bool AllowCategories, bool AllowNullaryMethods, DeclContext *CurContext,
+ AddedPropertiesSet &AddedProperties, ResultBuilder &Results,
+ bool IsBaseExprStatement = false, bool IsClassProperty = false) {
typedef CodeCompletionResult Result;
// Retrieve the definition.
Container = getContainerDef(Container);
// Add properties in this container.
- for (const auto *P : Container->instance_properties())
- if (AddedProperties.insert(P->getIdentifier()).second)
+ const auto AddProperty = [&](const ObjCPropertyDecl *P) {
+ if (!AddedProperties.insert(P->getIdentifier()).second)
+ return;
+
+ // FIXME: Provide block invocation completion for non-statement
+ // expressions.
+ if (!P->getType().getTypePtr()->isBlockPointerType() ||
+ !IsBaseExprStatement) {
+ Results.MaybeAddResult(Result(P, Results.getBasePriority(P), nullptr),
+ CurContext);
+ return;
+ }
+
+ // Block setter and invocation completion is provided only when we are able
+ // to find the FunctionProtoTypeLoc with parameter names for the block.
+ FunctionTypeLoc BlockLoc;
+ FunctionProtoTypeLoc BlockProtoLoc;
+ findTypeLocationForBlockDecl(P->getTypeSourceInfo(), BlockLoc,
+ BlockProtoLoc);
+ if (!BlockLoc) {
Results.MaybeAddResult(Result(P, Results.getBasePriority(P), nullptr),
CurContext);
+ return;
+ }
- // Add nullary methods
+ // The default completion result for block properties should be the block
+ // invocation completion when the base expression is a statement.
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ AddObjCBlockCall(Container->getASTContext(),
+ getCompletionPrintingPolicy(Results.getSema()), Builder, P,
+ BlockLoc, BlockProtoLoc);
+ Results.MaybeAddResult(
+ Result(Builder.TakeString(), P, Results.getBasePriority(P)),
+ CurContext);
+
+ // Provide additional block setter completion iff the base expression is a
+ // statement and the block property is mutable.
+ if (!P->isReadOnly()) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ AddResultTypeChunk(Container->getASTContext(),
+ getCompletionPrintingPolicy(Results.getSema()), P,
+ CCContext.getBaseType(), Builder);
+ Builder.AddTypedTextChunk(
+ Results.getAllocator().CopyString(P->getName()));
+ Builder.AddChunk(CodeCompletionString::CK_Equal);
+
+ std::string PlaceholderStr = formatBlockPlaceholder(
+ getCompletionPrintingPolicy(Results.getSema()), P, BlockLoc,
+ BlockProtoLoc, /*SuppressBlockName=*/true);
+ // Add the placeholder string.
+ Builder.AddPlaceholderChunk(
+ Builder.getAllocator().CopyString(PlaceholderStr));
+
+ Results.MaybeAddResult(
+ Result(Builder.TakeString(), P,
+ Results.getBasePriority(P) + CCD_BlockPropertySetter),
+ CurContext);
+ }
+ };
+
+ if (IsClassProperty) {
+ for (const auto *P : Container->class_properties())
+ AddProperty(P);
+ } else {
+ for (const auto *P : Container->instance_properties())
+ AddProperty(P);
+ }
+
+ // Add nullary methods or implicit class properties
if (AllowNullaryMethods) {
ASTContext &Context = Container->getASTContext();
PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema());
- for (auto *M : Container->methods()) {
- if (M->getSelector().isUnarySelector())
- if (IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0))
- if (AddedProperties.insert(Name).second) {
- CodeCompletionBuilder Builder(Results.getAllocator(),
- Results.getCodeCompletionTUInfo());
- AddResultTypeChunk(Context, Policy, M, CCContext.getBaseType(),
- Builder);
- Builder.AddTypedTextChunk(
- Results.getAllocator().CopyString(Name->getName()));
-
- Results.MaybeAddResult(Result(Builder.TakeString(), M,
- CCP_MemberDeclaration + CCD_MethodAsProperty),
- CurContext);
- }
+ // Adds a method result
+ const auto AddMethod = [&](const ObjCMethodDecl *M) {
+ IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0);
+ if (!Name)
+ return;
+ if (!AddedProperties.insert(Name).second)
+ return;
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ AddResultTypeChunk(Context, Policy, M, CCContext.getBaseType(), Builder);
+ Builder.AddTypedTextChunk(
+ Results.getAllocator().CopyString(Name->getName()));
+ Results.MaybeAddResult(
+ Result(Builder.TakeString(), M,
+ CCP_MemberDeclaration + CCD_MethodAsProperty),
+ CurContext);
+ };
+
+ if (IsClassProperty) {
+ for (const auto *M : Container->methods()) {
+ // Gather the class method that can be used as implicit property
+ // getters. Methods with arguments or methods that return void aren't
+ // added to the results as they can't be used as a getter.
+ if (!M->getSelector().isUnarySelector() ||
+ M->getReturnType()->isVoidType() || M->isInstanceMethod())
+ continue;
+ AddMethod(M);
+ }
+ } else {
+ for (auto *M : Container->methods()) {
+ if (M->getSelector().isUnarySelector())
+ AddMethod(M);
+ }
}
}
-
// Add properties in referenced protocols.
if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
for (auto *P : Protocol->protocols())
AddObjCProperties(CCContext, P, AllowCategories, AllowNullaryMethods,
- CurContext, AddedProperties, Results);
+ CurContext, AddedProperties, Results,
+ IsBaseExprStatement, IsClassProperty);
} else if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)){
if (AllowCategories) {
// Look through categories.
for (auto *Cat : IFace->known_categories())
AddObjCProperties(CCContext, Cat, AllowCategories, AllowNullaryMethods,
- CurContext, AddedProperties, Results);
+ CurContext, AddedProperties, Results,
+ IsBaseExprStatement, IsClassProperty);
}
// Look through protocols.
for (auto *I : IFace->all_referenced_protocols())
AddObjCProperties(CCContext, I, AllowCategories, AllowNullaryMethods,
- CurContext, AddedProperties, Results);
-
+ CurContext, AddedProperties, Results,
+ IsBaseExprStatement, IsClassProperty);
+
// Look in the superclass.
if (IFace->getSuperClass())
AddObjCProperties(CCContext, IFace->getSuperClass(), AllowCategories,
- AllowNullaryMethods, CurContext,
- AddedProperties, Results);
+ AllowNullaryMethods, CurContext, AddedProperties,
+ Results, IsBaseExprStatement, IsClassProperty);
} else if (const ObjCCategoryDecl *Category
= dyn_cast<ObjCCategoryDecl>(Container)) {
// Look through protocols.
for (auto *P : Category->protocols())
AddObjCProperties(CCContext, P, AllowCategories, AllowNullaryMethods,
- CurContext, AddedProperties, Results);
+ CurContext, AddedProperties, Results,
+ IsBaseExprStatement, IsClassProperty);
}
}
void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
- SourceLocation OpLoc,
- bool IsArrow) {
+ SourceLocation OpLoc, bool IsArrow,
+ bool IsBaseExprStatement) {
if (!Base || !CodeCompleter)
return;
@@ -3720,22 +3885,24 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
Results.AddResult(Result("template"));
}
}
- } else if (!IsArrow && BaseType->getAsObjCInterfacePointerType()) {
+ } else if (!IsArrow && BaseType->isObjCObjectPointerType()) {
// Objective-C property reference.
AddedPropertiesSet AddedProperties;
-
- // Add property results based on our interface.
- const ObjCObjectPointerType *ObjCPtr
- = BaseType->getAsObjCInterfacePointerType();
- assert(ObjCPtr && "Non-NULL pointer guaranteed above!");
- AddObjCProperties(CCContext, ObjCPtr->getInterfaceDecl(), true,
- /*AllowNullaryMethods=*/true, CurContext,
- AddedProperties, Results);
-
+
+ if (const ObjCObjectPointerType *ObjCPtr =
+ BaseType->getAsObjCInterfacePointerType()) {
+ // Add property results based on our interface.
+ assert(ObjCPtr && "Non-NULL pointer guaranteed above!");
+ AddObjCProperties(CCContext, ObjCPtr->getInterfaceDecl(), true,
+ /*AllowNullaryMethods=*/true, CurContext,
+ AddedProperties, Results, IsBaseExprStatement);
+ }
+
// Add properties from the protocols in a qualified interface.
- for (auto *I : ObjCPtr->quals())
+ for (auto *I : BaseType->getAs<ObjCObjectPointerType>()->quals())
AddObjCProperties(CCContext, I, true, /*AllowNullaryMethods=*/true,
- CurContext, AddedProperties, Results);
+ CurContext, AddedProperties, Results,
+ IsBaseExprStatement);
} else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
(!IsArrow && BaseType->isObjCObjectType())) {
// Objective-C instance variable access.
@@ -3765,6 +3932,30 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
Results.data(),Results.size());
}
+void Sema::CodeCompleteObjCClassPropertyRefExpr(Scope *S,
+ IdentifierInfo &ClassName,
+ SourceLocation ClassNameLoc,
+ bool IsBaseExprStatement) {
+ IdentifierInfo *ClassNamePtr = &ClassName;
+ ObjCInterfaceDecl *IFace = getObjCInterfaceDecl(ClassNamePtr, ClassNameLoc);
+ if (!IFace)
+ return;
+ CodeCompletionContext CCContext(
+ CodeCompletionContext::CCC_ObjCPropertyAccess);
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(), CCContext,
+ &ResultBuilder::IsMember);
+ Results.EnterNewScope();
+ AddedPropertiesSet AddedProperties;
+ AddObjCProperties(CCContext, IFace, true,
+ /*AllowNullaryMethods=*/true, CurContext, AddedProperties,
+ Results, IsBaseExprStatement,
+ /*IsClassProperty=*/true);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
if (!CodeCompleter)
return;
@@ -5952,7 +6143,7 @@ void Sema::CodeCompleteObjCProtocolReferences(
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
- if (CodeCompleter && CodeCompleter->includeGlobals()) {
+ if (CodeCompleter->includeGlobals()) {
Results.EnterNewScope();
// Tell the result set to ignore all of the protocols we have
@@ -5980,7 +6171,7 @@ void Sema::CodeCompleteObjCProtocolDecl(Scope *) {
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_ObjCProtocolName);
- if (CodeCompleter && CodeCompleter->includeGlobals()) {
+ if (CodeCompleter->includeGlobals()) {
Results.EnterNewScope();
// Add all protocols.
@@ -7008,7 +7199,7 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
.second) {
if (ReturnType.isNull()) {
Builder.AddChunk(CodeCompletionString::CK_LeftParen);
- Builder.AddTextChunk("NSSet *");
+ Builder.AddTextChunk("NSSet<NSString *> *");
Builder.AddChunk(CodeCompletionString::CK_RightParen);
}
diff --git a/lib/Sema/SemaCoroutine.cpp b/lib/Sema/SemaCoroutine.cpp
index c8715fff4159..3109358df464 100644
--- a/lib/Sema/SemaCoroutine.cpp
+++ b/lib/Sema/SemaCoroutine.cpp
@@ -26,15 +26,15 @@ using namespace sema;
static QualType lookupPromiseType(Sema &S, const FunctionProtoType *FnType,
SourceLocation Loc) {
// FIXME: Cache std::coroutine_traits once we've found it.
- NamespaceDecl *Std = S.getStdNamespace();
- if (!Std) {
+ NamespaceDecl *StdExp = S.lookupStdExperimentalNamespace();
+ if (!StdExp) {
S.Diag(Loc, diag::err_implied_std_coroutine_traits_not_found);
return QualType();
}
LookupResult Result(S, &S.PP.getIdentifierTable().get("coroutine_traits"),
Loc, Sema::LookupOrdinaryName);
- if (!S.LookupQualifiedName(Result, Std)) {
+ if (!S.LookupQualifiedName(Result, StdExp)) {
S.Diag(Loc, diag::err_implied_std_coroutine_traits_not_found);
return QualType();
}
@@ -78,7 +78,7 @@ static QualType lookupPromiseType(Sema &S, const FunctionProtoType *FnType,
auto *Promise = R.getAsSingle<TypeDecl>();
if (!Promise) {
S.Diag(Loc, diag::err_implied_std_coroutine_traits_promise_type_not_found)
- << RD;
+ << RD;
return QualType();
}
@@ -86,75 +86,131 @@ static QualType lookupPromiseType(Sema &S, const FunctionProtoType *FnType,
QualType PromiseType = S.Context.getTypeDeclType(Promise);
if (!PromiseType->getAsCXXRecordDecl()) {
// Use the fully-qualified name of the type.
- auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, Std);
+ auto *NNS = NestedNameSpecifier::Create(S.Context, nullptr, StdExp);
NNS = NestedNameSpecifier::Create(S.Context, NNS, false,
CoroTrait.getTypePtr());
PromiseType = S.Context.getElaboratedType(ETK_None, NNS, PromiseType);
S.Diag(Loc, diag::err_implied_std_coroutine_traits_promise_type_not_class)
- << PromiseType;
+ << PromiseType;
return QualType();
}
return PromiseType;
}
-/// Check that this is a context in which a coroutine suspension can appear.
-static FunctionScopeInfo *
-checkCoroutineContext(Sema &S, SourceLocation Loc, StringRef Keyword) {
+static bool isValidCoroutineContext(Sema &S, SourceLocation Loc,
+ StringRef Keyword) {
// 'co_await' and 'co_yield' are not permitted in unevaluated operands.
if (S.isUnevaluatedContext()) {
S.Diag(Loc, diag::err_coroutine_unevaluated_context) << Keyword;
- return nullptr;
+ return false;
}
// Any other usage must be within a function.
- // FIXME: Reject a coroutine with a deduced return type.
auto *FD = dyn_cast<FunctionDecl>(S.CurContext);
if (!FD) {
S.Diag(Loc, isa<ObjCMethodDecl>(S.CurContext)
? diag::err_coroutine_objc_method
: diag::err_coroutine_outside_function) << Keyword;
- } else if (isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD)) {
- // Coroutines TS [special]/6:
- // A special member function shall not be a coroutine.
- //
- // FIXME: We assume that this really means that a coroutine cannot
- // be a constructor or destructor.
- S.Diag(Loc, diag::err_coroutine_ctor_dtor)
- << isa<CXXDestructorDecl>(FD) << Keyword;
- } else if (FD->isConstexpr()) {
- S.Diag(Loc, diag::err_coroutine_constexpr) << Keyword;
- } else if (FD->isVariadic()) {
- S.Diag(Loc, diag::err_coroutine_varargs) << Keyword;
- } else {
- auto *ScopeInfo = S.getCurFunction();
- assert(ScopeInfo && "missing function scope for function");
-
- // If we don't have a promise variable, build one now.
- if (!ScopeInfo->CoroutinePromise) {
- QualType T =
- FD->getType()->isDependentType()
- ? S.Context.DependentTy
- : lookupPromiseType(S, FD->getType()->castAs<FunctionProtoType>(),
- Loc);
- if (T.isNull())
- return nullptr;
-
- // Create and default-initialize the promise.
- ScopeInfo->CoroutinePromise =
- VarDecl::Create(S.Context, FD, FD->getLocation(), FD->getLocation(),
- &S.PP.getIdentifierTable().get("__promise"), T,
- S.Context.getTrivialTypeSourceInfo(T, Loc), SC_None);
- S.CheckVariableDeclarationType(ScopeInfo->CoroutinePromise);
- if (!ScopeInfo->CoroutinePromise->isInvalidDecl())
- S.ActOnUninitializedDecl(ScopeInfo->CoroutinePromise, false);
- }
+ return false;
+ }
- return ScopeInfo;
+ // An enumeration for mapping the diagnostic type to the correct diagnostic
+ // selection index.
+ enum InvalidFuncDiag {
+ DiagCtor = 0,
+ DiagDtor,
+ DiagCopyAssign,
+ DiagMoveAssign,
+ DiagMain,
+ DiagConstexpr,
+ DiagAutoRet,
+ DiagVarargs,
+ };
+ bool Diagnosed = false;
+ auto DiagInvalid = [&](InvalidFuncDiag ID) {
+ S.Diag(Loc, diag::err_coroutine_invalid_func_context) << ID << Keyword;
+ Diagnosed = true;
+ return false;
+ };
+
+ // Diagnose when a constructor, destructor, copy/move assignment operator,
+ // or the function 'main' are declared as a coroutine.
+ auto *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD && isa<CXXConstructorDecl>(MD))
+ return DiagInvalid(DiagCtor);
+ else if (MD && isa<CXXDestructorDecl>(MD))
+ return DiagInvalid(DiagDtor);
+ else if (MD && MD->isCopyAssignmentOperator())
+ return DiagInvalid(DiagCopyAssign);
+ else if (MD && MD->isMoveAssignmentOperator())
+ return DiagInvalid(DiagMoveAssign);
+ else if (FD->isMain())
+ return DiagInvalid(DiagMain);
+
+ // Emit a diagnostics for each of the following conditions which is not met.
+ if (FD->isConstexpr())
+ DiagInvalid(DiagConstexpr);
+ if (FD->getReturnType()->isUndeducedType())
+ DiagInvalid(DiagAutoRet);
+ if (FD->isVariadic())
+ DiagInvalid(DiagVarargs);
+
+ return !Diagnosed;
+}
+
+/// Check that this is a context in which a coroutine suspension can appear.
+static FunctionScopeInfo *checkCoroutineContext(Sema &S, SourceLocation Loc,
+ StringRef Keyword) {
+ if (!isValidCoroutineContext(S, Loc, Keyword))
+ return nullptr;
+
+ assert(isa<FunctionDecl>(S.CurContext) && "not in a function scope");
+ auto *FD = cast<FunctionDecl>(S.CurContext);
+ auto *ScopeInfo = S.getCurFunction();
+ assert(ScopeInfo && "missing function scope for function");
+
+ // If we don't have a promise variable, build one now.
+ if (!ScopeInfo->CoroutinePromise) {
+ QualType T = FD->getType()->isDependentType()
+ ? S.Context.DependentTy
+ : lookupPromiseType(
+ S, FD->getType()->castAs<FunctionProtoType>(), Loc);
+ if (T.isNull())
+ return nullptr;
+
+ // Create and default-initialize the promise.
+ ScopeInfo->CoroutinePromise =
+ VarDecl::Create(S.Context, FD, FD->getLocation(), FD->getLocation(),
+ &S.PP.getIdentifierTable().get("__promise"), T,
+ S.Context.getTrivialTypeSourceInfo(T, Loc), SC_None);
+ S.CheckVariableDeclarationType(ScopeInfo->CoroutinePromise);
+ if (!ScopeInfo->CoroutinePromise->isInvalidDecl())
+ S.ActOnUninitializedDecl(ScopeInfo->CoroutinePromise, false);
}
- return nullptr;
+ return ScopeInfo;
+}
+
+static Expr *buildBuiltinCall(Sema &S, SourceLocation Loc, Builtin::ID Id,
+ MutableArrayRef<Expr *> CallArgs) {
+ StringRef Name = S.Context.BuiltinInfo.getName(Id);
+ LookupResult R(S, &S.Context.Idents.get(Name), Loc, Sema::LookupOrdinaryName);
+ S.LookupName(R, S.TUScope, /*AllowBuiltinCreation=*/true);
+
+ auto *BuiltInDecl = R.getAsSingle<FunctionDecl>();
+ assert(BuiltInDecl && "failed to find builtin declaration");
+
+ ExprResult DeclRef =
+ S.BuildDeclRefExpr(BuiltInDecl, BuiltInDecl->getType(), VK_LValue, Loc);
+ assert(DeclRef.isUsable() && "Builtin reference cannot fail");
+
+ ExprResult Call =
+ S.ActOnCallExpr(/*Scope=*/nullptr, DeclRef.get(), Loc, CallArgs, Loc);
+
+ assert(!Call.isInvalid() && "Call to builtin cannot fail!");
+ return Call.get();
}
/// Build a call to 'operator co_await' if there is a suitable operator for
@@ -199,7 +255,7 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, SourceLocation Loc,
const StringRef Funcs[] = {"await_ready", "await_suspend", "await_resume"};
for (size_t I = 0, N = llvm::array_lengthof(Funcs); I != N; ++I) {
Expr *Operand = new (S.Context) OpaqueValueExpr(
- Loc, E->getType(), VK_LValue, E->getObjectKind(), E);
+ Loc, E->getType(), VK_LValue, E->getObjectKind(), E);
// FIXME: Pass coroutine handle to await_suspend.
ExprResult Result = buildMemberCall(S, Operand, Loc, Funcs[I], None);
@@ -213,6 +269,11 @@ static ReadySuspendResumeResult buildCoawaitCalls(Sema &S, SourceLocation Loc,
}
ExprResult Sema::ActOnCoawaitExpr(Scope *S, SourceLocation Loc, Expr *E) {
+ auto *Coroutine = checkCoroutineContext(*this, Loc, "co_await");
+ if (!Coroutine) {
+ CorrectDelayedTyposInExpr(E);
+ return ExprError();
+ }
if (E->getType()->isPlaceholderType()) {
ExprResult R = CheckPlaceholderExpr(E);
if (R.isInvalid()) return ExprError();
@@ -222,6 +283,7 @@ ExprResult Sema::ActOnCoawaitExpr(Scope *S, SourceLocation Loc, Expr *E) {
ExprResult Awaitable = buildOperatorCoawaitCall(*this, S, Loc, E);
if (Awaitable.isInvalid())
return ExprError();
+
return BuildCoawaitExpr(Loc, Awaitable.get());
}
ExprResult Sema::BuildCoawaitExpr(SourceLocation Loc, Expr *E) {
@@ -275,8 +337,10 @@ static ExprResult buildPromiseCall(Sema &S, FunctionScopeInfo *Coroutine,
ExprResult Sema::ActOnCoyieldExpr(Scope *S, SourceLocation Loc, Expr *E) {
auto *Coroutine = checkCoroutineContext(*this, Loc, "co_yield");
- if (!Coroutine)
+ if (!Coroutine) {
+ CorrectDelayedTyposInExpr(E);
return ExprError();
+ }
// Build yield_value call.
ExprResult Awaitable =
@@ -325,8 +389,14 @@ ExprResult Sema::BuildCoyieldExpr(SourceLocation Loc, Expr *E) {
}
StmtResult Sema::ActOnCoreturnStmt(SourceLocation Loc, Expr *E) {
+ auto *Coroutine = checkCoroutineContext(*this, Loc, "co_return");
+ if (!Coroutine) {
+ CorrectDelayedTyposInExpr(E);
+ return StmtError();
+ }
return BuildCoreturnStmt(Loc, E);
}
+
StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E) {
auto *Coroutine = checkCoroutineContext(*this, Loc, "co_return");
if (!Coroutine)
@@ -343,7 +413,7 @@ StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E) {
// of scope, we should treat the operand as an xvalue for this overload
// resolution.
ExprResult PC;
- if (E && !E->getType()->isVoidType()) {
+ if (E && (isa<InitListExpr>(E) || !E->getType()->isVoidType())) {
PC = buildPromiseCall(*this, Coroutine, Loc, "return_value", E);
} else {
E = MakeFullDiscardedValueExpr(E).get();
@@ -359,6 +429,141 @@ StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E) {
return Res;
}
+static ExprResult buildStdCurrentExceptionCall(Sema &S, SourceLocation Loc) {
+ NamespaceDecl *Std = S.getStdNamespace();
+ if (!Std) {
+ S.Diag(Loc, diag::err_implied_std_current_exception_not_found);
+ return ExprError();
+ }
+ LookupResult Result(S, &S.PP.getIdentifierTable().get("current_exception"),
+ Loc, Sema::LookupOrdinaryName);
+ if (!S.LookupQualifiedName(Result, Std)) {
+ S.Diag(Loc, diag::err_implied_std_current_exception_not_found);
+ return ExprError();
+ }
+
+ // FIXME The STL is free to provide more than one overload.
+ FunctionDecl *FD = Result.getAsSingle<FunctionDecl>();
+ if (!FD) {
+ S.Diag(Loc, diag::err_malformed_std_current_exception);
+ return ExprError();
+ }
+ ExprResult Res = S.BuildDeclRefExpr(FD, FD->getType(), VK_LValue, Loc);
+ Res = S.ActOnCallExpr(/*Scope*/ nullptr, Res.get(), Loc, None, Loc);
+ if (Res.isInvalid()) {
+ S.Diag(Loc, diag::err_malformed_std_current_exception);
+ return ExprError();
+ }
+ return Res;
+}
+
+// Find an appropriate delete for the promise.
+static FunctionDecl *findDeleteForPromise(Sema &S, SourceLocation Loc,
+ QualType PromiseType) {
+ FunctionDecl *OperatorDelete = nullptr;
+
+ DeclarationName DeleteName =
+ S.Context.DeclarationNames.getCXXOperatorName(OO_Delete);
+
+ auto *PointeeRD = PromiseType->getAsCXXRecordDecl();
+ assert(PointeeRD && "PromiseType must be a CxxRecordDecl type");
+
+ if (S.FindDeallocationFunction(Loc, PointeeRD, DeleteName, OperatorDelete))
+ return nullptr;
+
+ if (!OperatorDelete) {
+ // Look for a global declaration.
+ const bool CanProvideSize = S.isCompleteType(Loc, PromiseType);
+ const bool Overaligned = false;
+ OperatorDelete = S.FindUsualDeallocationFunction(Loc, CanProvideSize,
+ Overaligned, DeleteName);
+ }
+ S.MarkFunctionReferenced(Loc, OperatorDelete);
+ return OperatorDelete;
+}
+
+// Builds allocation and deallocation for the coroutine. Returns false on
+// failure.
+static bool buildAllocationAndDeallocation(Sema &S, SourceLocation Loc,
+ FunctionScopeInfo *Fn,
+ Expr *&Allocation,
+ Stmt *&Deallocation) {
+ TypeSourceInfo *TInfo = Fn->CoroutinePromise->getTypeSourceInfo();
+ QualType PromiseType = TInfo->getType();
+ if (PromiseType->isDependentType())
+ return true;
+
+ if (S.RequireCompleteType(Loc, PromiseType, diag::err_incomplete_type))
+ return false;
+
+ // FIXME: Add support for get_return_object_on_allocation failure.
+ // FIXME: Add support for stateful allocators.
+
+ FunctionDecl *OperatorNew = nullptr;
+ FunctionDecl *OperatorDelete = nullptr;
+ FunctionDecl *UnusedResult = nullptr;
+ bool PassAlignment = false;
+
+ S.FindAllocationFunctions(Loc, SourceRange(),
+ /*UseGlobal*/ false, PromiseType,
+ /*isArray*/ false, PassAlignment,
+ /*PlacementArgs*/ None, OperatorNew, UnusedResult);
+
+ OperatorDelete = findDeleteForPromise(S, Loc, PromiseType);
+
+ if (!OperatorDelete || !OperatorNew)
+ return false;
+
+ Expr *FramePtr =
+ buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_frame, {});
+
+ Expr *FrameSize =
+ buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_size, {});
+
+ // Make new call.
+
+ ExprResult NewRef =
+ S.BuildDeclRefExpr(OperatorNew, OperatorNew->getType(), VK_LValue, Loc);
+ if (NewRef.isInvalid())
+ return false;
+
+ ExprResult NewExpr =
+ S.ActOnCallExpr(S.getCurScope(), NewRef.get(), Loc, FrameSize, Loc);
+ if (NewExpr.isInvalid())
+ return false;
+
+ Allocation = NewExpr.get();
+
+ // Make delete call.
+
+ QualType OpDeleteQualType = OperatorDelete->getType();
+
+ ExprResult DeleteRef =
+ S.BuildDeclRefExpr(OperatorDelete, OpDeleteQualType, VK_LValue, Loc);
+ if (DeleteRef.isInvalid())
+ return false;
+
+ Expr *CoroFree =
+ buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_free, {FramePtr});
+
+ SmallVector<Expr *, 2> DeleteArgs{CoroFree};
+
+ // Check if we need to pass the size.
+ const auto *OpDeleteType =
+ OpDeleteQualType.getTypePtr()->getAs<FunctionProtoType>();
+ if (OpDeleteType->getNumParams() > 1)
+ DeleteArgs.push_back(FrameSize);
+
+ ExprResult DeleteExpr =
+ S.ActOnCallExpr(S.getCurScope(), DeleteRef.get(), Loc, DeleteArgs, Loc);
+ if (DeleteExpr.isInvalid())
+ return false;
+
+ Deallocation = DeleteExpr.get();
+
+ return true;
+}
+
void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
FunctionScopeInfo *Fn = getCurFunction();
assert(Fn && !Fn->CoroutineStmts.empty() && "not a coroutine");
@@ -369,8 +574,8 @@ void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
Diag(Fn->FirstReturnLoc, diag::err_return_in_coroutine);
auto *First = Fn->CoroutineStmts[0];
Diag(First->getLocStart(), diag::note_declared_coroutine_here)
- << (isa<CoawaitExpr>(First) ? 0 :
- isa<CoyieldExpr>(First) ? 1 : 2);
+ << (isa<CoawaitExpr>(First) ? 0 :
+ isa<CoyieldExpr>(First) ? 1 : 2);
}
bool AnyCoawaits = false;
@@ -413,15 +618,69 @@ void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
if (FinalSuspend.isInvalid())
return FD->setInvalidDecl();
- // FIXME: Perform analysis of set_exception call.
+ // Form and check allocation and deallocation calls.
+ Expr *Allocation = nullptr;
+ Stmt *Deallocation = nullptr;
+ if (!buildAllocationAndDeallocation(*this, Loc, Fn, Allocation, Deallocation))
+ return FD->setInvalidDecl();
- // FIXME: Try to form 'p.return_void();' expression statement to handle
// control flowing off the end of the coroutine.
+ // Also try to form 'p.set_exception(std::current_exception());' to handle
+ // uncaught exceptions.
+ ExprResult SetException;
+ StmtResult Fallthrough;
+ if (Fn->CoroutinePromise &&
+ !Fn->CoroutinePromise->getType()->isDependentType()) {
+ CXXRecordDecl *RD = Fn->CoroutinePromise->getType()->getAsCXXRecordDecl();
+ assert(RD && "Type should have already been checked");
+ // [dcl.fct.def.coroutine]/4
+ // The unqualified-ids 'return_void' and 'return_value' are looked up in
+ // the scope of class P. If both are found, the program is ill-formed.
+ DeclarationName RVoidDN = PP.getIdentifierInfo("return_void");
+ LookupResult RVoidResult(*this, RVoidDN, Loc, Sema::LookupMemberName);
+ const bool HasRVoid = LookupQualifiedName(RVoidResult, RD);
+
+ DeclarationName RValueDN = PP.getIdentifierInfo("return_value");
+ LookupResult RValueResult(*this, RValueDN, Loc, Sema::LookupMemberName);
+ const bool HasRValue = LookupQualifiedName(RValueResult, RD);
+
+ if (HasRVoid && HasRValue) {
+ // FIXME Improve this diagnostic
+ Diag(FD->getLocation(), diag::err_coroutine_promise_return_ill_formed)
+ << RD;
+ return FD->setInvalidDecl();
+ } else if (HasRVoid) {
+ // If the unqualified-id return_void is found, flowing off the end of a
+ // coroutine is equivalent to a co_return with no operand. Otherwise,
+ // flowing off the end of a coroutine results in undefined behavior.
+ Fallthrough = BuildCoreturnStmt(FD->getLocation(), nullptr);
+ Fallthrough = ActOnFinishFullStmt(Fallthrough.get());
+ if (Fallthrough.isInvalid())
+ return FD->setInvalidDecl();
+ }
+
+ // [dcl.fct.def.coroutine]/3
+ // The unqualified-id set_exception is found in the scope of P by class
+ // member access lookup (3.4.5).
+ DeclarationName SetExDN = PP.getIdentifierInfo("set_exception");
+ LookupResult SetExResult(*this, SetExDN, Loc, Sema::LookupMemberName);
+ if (LookupQualifiedName(SetExResult, RD)) {
+ // Form the call 'p.set_exception(std::current_exception())'
+ SetException = buildStdCurrentExceptionCall(*this, Loc);
+ if (SetException.isInvalid())
+ return FD->setInvalidDecl();
+ Expr *E = SetException.get();
+ SetException = buildPromiseCall(*this, Fn, Loc, "set_exception", E);
+ SetException = ActOnFinishFullExpr(SetException.get(), Loc);
+ if (SetException.isInvalid())
+ return FD->setInvalidDecl();
+ }
+ }
// Build implicit 'p.get_return_object()' expression and form initialization
// of return type from it.
ExprResult ReturnObject =
- buildPromiseCall(*this, Fn, Loc, "get_return_object", None);
+ buildPromiseCall(*this, Fn, Loc, "get_return_object", None);
if (ReturnObject.isInvalid())
return FD->setInvalidDecl();
QualType RetType = FD->getReturnType();
@@ -443,6 +702,6 @@ void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
// Build body for the coroutine wrapper statement.
Body = new (Context) CoroutineBodyStmt(
Body, PromiseStmt.get(), InitialSuspend.get(), FinalSuspend.get(),
- /*SetException*/nullptr, /*Fallthrough*/nullptr,
+ SetException.get(), Fallthrough.get(), Allocation, Deallocation,
ReturnObject.get(), ParamMoves);
}
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index 41719d4e7b08..c32757565dd1 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
@@ -41,6 +40,7 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Triple.h"
@@ -780,8 +780,8 @@ Sema::ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name,
ObjCMethodDecl *CurMethod = getCurMethodDecl();
if (NextToken.is(tok::coloncolon)) {
- BuildCXXNestedNameSpecifier(S, *Name, NameLoc, NextToken.getLocation(),
- QualType(), false, SS, nullptr, false);
+ NestedNameSpecInfo IdInfo(Name, NameLoc, NextToken.getLocation());
+ BuildCXXNestedNameSpecifier(S, IdInfo, false, SS, nullptr, false);
}
LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName);
@@ -1522,7 +1522,7 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
// White-list anything with an __attribute__((unused)) type.
- QualType Ty = VD->getType();
+ const auto *Ty = VD->getType().getTypePtr();
// Only look at the outermost level of typedef.
if (const TypedefType *TT = Ty->getAs<TypedefType>()) {
@@ -1535,6 +1535,10 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (Ty->isIncompleteType() || Ty->isDependentType())
return false;
+ // Look at the element type to ensure that the warning behaviour is
+ // consistent for both scalars and arrays.
+ Ty = Ty->getBaseElementTypeUnsafe();
+
if (const TagType *TT = Ty->getAs<TagType>()) {
const TagDecl *Tag = TT->getDecl();
if (Tag->hasAttr<UnusedAttr>())
@@ -1791,7 +1795,9 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
return nullptr;
}
- if (!ForRedeclaration && Context.BuiltinInfo.isPredefinedLibFunction(ID)) {
+ if (!ForRedeclaration &&
+ (Context.BuiltinInfo.isPredefinedLibFunction(ID) ||
+ Context.BuiltinInfo.isHeaderDependentFunction(ID))) {
Diag(Loc, diag::ext_implicit_lib_function_decl)
<< Context.BuiltinInfo.getName(ID) << R;
if (Context.BuiltinInfo.getHeaderName(ID) &&
@@ -2246,6 +2252,13 @@ static bool mergeAlignedAttrs(Sema &S, NamedDecl *New, Decl *Old) {
static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
const InheritableAttr *Attr,
Sema::AvailabilityMergeKind AMK) {
+ // This function copies an attribute Attr from a previous declaration to the
+ // new declaration D if the new declaration doesn't itself have that attribute
+ // yet or if that attribute allows duplicates.
+ // If you're adding a new attribute that requires logic different from
+ // "use explicit attribute on decl if present, else use attribute from
+ // previous decl", for example if the attribute needs to be consistent
+ // between redeclarations, you need to call a custom merge function here.
InheritableAttr *NewAttr = nullptr;
unsigned AttrSpellingListIndex = Attr->getSpellingListIndex();
if (const auto *AA = dyn_cast<AvailabilityAttr>(Attr))
@@ -2283,7 +2296,13 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
NewAttr = S.mergeAlwaysInlineAttr(D, AA->getRange(),
&S.Context.Idents.get(AA->getSpelling()),
AttrSpellingListIndex);
- else if (const auto *MA = dyn_cast<MinSizeAttr>(Attr))
+ else if (S.getLangOpts().CUDA && isa<FunctionDecl>(D) &&
+ (isa<CUDAHostAttr>(Attr) || isa<CUDADeviceAttr>(Attr) ||
+ isa<CUDAGlobalAttr>(Attr))) {
+ // CUDA target attributes are part of function signature for
+ // overloading purposes and must not be merged.
+ return false;
+ } else if (const auto *MA = dyn_cast<MinSizeAttr>(Attr))
NewAttr = S.mergeMinSizeAttr(D, MA->getRange(), AttrSpellingListIndex);
else if (const auto *OA = dyn_cast<OptimizeNoneAttr>(Attr))
NewAttr = S.mergeOptimizeNoneAttr(D, OA->getRange(), AttrSpellingListIndex);
@@ -2304,6 +2323,9 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
(AMK == Sema::AMK_Override ||
AMK == Sema::AMK_ProtocolImplementation))
NewAttr = nullptr;
+ else if (const auto *UA = dyn_cast<UuidAttr>(Attr))
+ NewAttr = S.mergeUuidAttr(D, UA->getRange(), AttrSpellingListIndex,
+ UA->getGuid());
else if (Attr->duplicatesAllowed() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
@@ -2915,10 +2937,20 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
}
if (getLangOpts().CPlusPlus) {
- // (C++98 13.1p2):
+ // C++1z [over.load]p2
// Certain function declarations cannot be overloaded:
- // -- Function declarations that differ only in the return type
- // cannot be overloaded.
+ // -- Function declarations that differ only in the return type,
+ // the exception specification, or both cannot be overloaded.
+
+ // Check the exception specifications match. This may recompute the type of
+ // both Old and New if it resolved exception specifications, so grab the
+ // types again after this. Because this updates the type, we do this before
+ // any of the other checks below, which may update the "de facto" NewQType
+ // but do not necessarily update the type of New.
+ if (CheckEquivalentExceptionSpec(Old, New))
+ return true;
+ OldQType = Context.getCanonicalType(Old->getType());
+ NewQType = Context.getCanonicalType(New->getType());
// Go back to the type source info to compare the declared return types,
// per C++1y [dcl.type.auto]p13:
@@ -2933,10 +2965,10 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
(New->getTypeSourceInfo()
? New->getTypeSourceInfo()->getType()->castAs<FunctionType>()
: NewType)->getReturnType();
- QualType ResQT;
if (!Context.hasSameType(OldDeclaredReturnType, NewDeclaredReturnType) &&
!((NewQType->isDependentType() || OldQType->isDependentType()) &&
New->isLocalExternDecl())) {
+ QualType ResQT;
if (NewDeclaredReturnType->isObjCObjectPointerType() &&
OldDeclaredReturnType->isObjCObjectPointerType())
ResQT = Context.mergeObjCGCQualifiers(NewQType, OldQType);
@@ -3074,7 +3106,7 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
// noreturn should now match unless the old type info didn't have it.
QualType OldQTypeForComparison = OldQType;
if (!OldTypeInfo.getNoReturn() && NewTypeInfo.getNoReturn()) {
- assert(OldQType == QualType(OldType, 0));
+ auto *OldType = OldQType->castAs<FunctionProtoType>();
const FunctionType *OldTypeForComparison
= Context.adjustFunctionType(OldType, OldTypeInfo.withNoReturn(true));
OldQTypeForComparison = QualType(OldTypeForComparison, 0);
@@ -3367,11 +3399,11 @@ void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old,
// We are merging a variable declaration New into Old. If it has an array
// bound, and that bound differs from Old's bound, we should diagnose the
// mismatch.
- if (!NewArray->isIncompleteArrayType()) {
+ if (!NewArray->isIncompleteArrayType() && !NewArray->isDependentType()) {
for (VarDecl *PrevVD = Old->getMostRecentDecl(); PrevVD;
PrevVD = PrevVD->getPreviousDecl()) {
const ArrayType *PrevVDTy = Context.getAsArrayType(PrevVD->getType());
- if (PrevVDTy->isIncompleteArrayType())
+ if (PrevVDTy->isIncompleteArrayType() || PrevVDTy->isDependentType())
continue;
if (!Context.hasSameType(NewArray, PrevVDTy))
@@ -3657,29 +3689,16 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
}
// C++ doesn't have tentative definitions, so go right ahead and check here.
- VarDecl *Def;
if (getLangOpts().CPlusPlus &&
- New->isThisDeclarationADefinition() == VarDecl::Definition &&
- (Def = Old->getDefinition())) {
- NamedDecl *Hidden = nullptr;
- if (!hasVisibleDefinition(Def, &Hidden) &&
- (New->getFormalLinkage() == InternalLinkage ||
- New->getDescribedVarTemplate() ||
- New->getNumTemplateParameterLists() ||
- New->getDeclContext()->isDependentContext())) {
- // The previous definition is hidden, and multiple definitions are
- // permitted (in separate TUs). Form another definition of it.
- } else if (Old->isStaticDataMember() &&
- Old->getCanonicalDecl()->isInline() &&
- Old->getCanonicalDecl()->isConstexpr()) {
+ New->isThisDeclarationADefinition() == VarDecl::Definition) {
+ if (Old->isStaticDataMember() && Old->getCanonicalDecl()->isInline() &&
+ Old->getCanonicalDecl()->isConstexpr()) {
// This definition won't be a definition any more once it's been merged.
Diag(New->getLocation(),
diag::warn_deprecated_redundant_constexpr_static_def);
- } else {
- Diag(New->getLocation(), diag::err_redefinition) << New;
- Diag(Def->getLocation(), diag::note_previous_definition);
- New->setInvalidDecl();
- return;
+ } else if (VarDecl *Def = Old->getDefinition()) {
+ if (checkVarDeclRedefinition(Def, New))
+ return;
}
}
@@ -3708,6 +3727,32 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
New->setImplicitlyInline();
}
+/// We've just determined that \p Old and \p New both appear to be definitions
+/// of the same variable. Either diagnose or fix the problem.
+bool Sema::checkVarDeclRedefinition(VarDecl *Old, VarDecl *New) {
+ if (!hasVisibleDefinition(Old) &&
+ (New->getFormalLinkage() == InternalLinkage ||
+ New->isInline() ||
+ New->getDescribedVarTemplate() ||
+ New->getNumTemplateParameterLists() ||
+ New->getDeclContext()->isDependentContext())) {
+ // The previous definition is hidden, and multiple definitions are
+ // permitted (in separate TUs). Demote this to a declaration.
+ New->demoteThisDefinitionToDeclaration();
+
+ // Make the canonical definition visible.
+ if (auto *OldTD = Old->getDescribedVarTemplate())
+ makeMergedDefinitionVisible(OldTD, New->getLocation());
+ makeMergedDefinitionVisible(Old, New->getLocation());
+ return false;
+ } else {
+ Diag(New->getLocation(), diag::err_redefinition) << New;
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->setInvalidDecl();
+ return true;
+ }
+}
+
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
/// no declarator (e.g. "struct foo;") is parsed.
Decl *
@@ -4793,6 +4838,9 @@ Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
Dcl && Dcl->getDeclContext()->isFileContext())
Dcl->setTopLevelDeclInObjCContainer();
+ if (getLangOpts().OpenCL)
+ setCurrentOpenCLExtensionForDecl(Dcl);
+
return Dcl;
}
@@ -4921,7 +4969,9 @@ NamedDecl *Sema::HandleDeclarator(Scope *S, Declarator &D,
// All of these full declarators require an identifier. If it doesn't have
// one, the ParsedFreeStandingDeclSpec action should be used.
- if (!Name) {
+ if (D.isDecompositionDeclarator()) {
+ return ActOnDecompositionDeclarator(S, D, TemplateParamLists);
+ } else if (!Name) {
if (!D.isInvalidType()) // Reject this if we think it is valid.
Diag(D.getDeclSpec().getLocStart(),
diag::err_declarator_need_ident)
@@ -5595,6 +5645,9 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
NamedDecl *NewDecl,
bool IsSpecialization,
bool IsDefinition) {
+ if (OldDecl->isInvalidDecl())
+ return;
+
if (TemplateDecl *OldTD = dyn_cast<TemplateDecl>(OldDecl)) {
OldDecl = OldTD->getTemplatedDecl();
if (!IsSpecialization)
@@ -5715,23 +5768,7 @@ static bool isFunctionDefinitionDiscarded(Sema &S, FunctionDecl *FD) {
return false;
// Okay, go ahead and call the relatively-more-expensive function.
-
-#ifndef NDEBUG
- // AST quite reasonably asserts that it's working on a function
- // definition. We don't really have a way to tell it that we're
- // currently defining the function, so just lie to it in +Asserts
- // builds. This is an awful hack.
- FD->setLazyBody(1);
-#endif
-
- bool isC99Inline =
- S.Context.GetGVALinkageForFunction(FD) == GVA_AvailableExternally;
-
-#ifndef NDEBUG
- FD->setLazyBody(0);
-#endif
-
- return isC99Inline;
+ return S.Context.GetGVALinkageForFunction(FD) == GVA_AvailableExternally;
}
/// Determine whether a variable is extern "C" prior to attaching
@@ -5845,40 +5882,55 @@ static bool isDeclExternC(const Decl *D) {
llvm_unreachable("Unknown type of decl!");
}
-NamedDecl *
-Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
- TypeSourceInfo *TInfo, LookupResult &Previous,
- MultiTemplateParamsArg TemplateParamLists,
- bool &AddToScope) {
+NamedDecl *Sema::ActOnVariableDeclarator(
+ Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo,
+ LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists,
+ bool &AddToScope, ArrayRef<BindingDecl *> Bindings) {
QualType R = TInfo->getType();
DeclarationName Name = GetNameForDeclarator(D).getName();
- // OpenCL v2.0 s6.9.b - Image type can only be used as a function argument.
- // OpenCL v2.0 s6.13.16.1 - Pipe type can only be used as a function
- // argument.
- if (getLangOpts().OpenCL && (R->isImageType() || R->isPipeType())) {
- Diag(D.getIdentifierLoc(),
- diag::err_opencl_type_can_only_be_used_as_function_parameter)
- << R;
- D.setInvalidType();
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+
+ if (D.isDecompositionDeclarator()) {
+ AddToScope = false;
+ // Take the name of the first declarator as our name for diagnostic
+ // purposes.
+ auto &Decomp = D.getDecompositionDeclarator();
+ if (!Decomp.bindings().empty()) {
+ II = Decomp.bindings()[0].Name;
+ Name = II;
+ }
+ } else if (!II) {
+ Diag(D.getIdentifierLoc(), diag::err_bad_variable_name)
+ << Name;
return nullptr;
}
- DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec();
- StorageClass SC = StorageClassSpecToVarDeclStorageClass(D.getDeclSpec());
-
- // dllimport globals without explicit storage class are treated as extern. We
- // have to change the storage class this early to get the right DeclContext.
- if (SC == SC_None && !DC->isRecord() &&
- hasParsedAttr(S, D, AttributeList::AT_DLLImport) &&
- !hasParsedAttr(S, D, AttributeList::AT_DLLExport))
- SC = SC_Extern;
+ if (getLangOpts().OpenCL) {
+ // OpenCL v2.0 s6.9.b - Image type can only be used as a function argument.
+ // OpenCL v2.0 s6.13.16.1 - Pipe type can only be used as a function
+ // argument.
+ if (R->isImageType() || R->isPipeType()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_opencl_type_can_only_be_used_as_function_parameter)
+ << R;
+ D.setInvalidType();
+ return nullptr;
+ }
- DeclContext *OriginalDC = DC;
- bool IsLocalExternDecl = SC == SC_Extern &&
- adjustContextForLocalExternDecl(DC);
+ // OpenCL v1.2 s6.9.r:
+ // The event type cannot be used to declare a program scope variable.
+ // OpenCL v2.0 s6.9.q:
+ // The clk_event_t and reserve_id_t types cannot be declared in program scope.
+ if (NULL == S->getParent()) {
+ if (R->isReserveIDT() || R->isClkEventT() || R->isEventT()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_invalid_type_for_program_scope_var) << R;
+ D.setInvalidType();
+ return nullptr;
+ }
+ }
- if (getLangOpts().OpenCL) {
// OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed.
QualType NR = R;
while (NR->isPointerType()) {
@@ -5890,7 +5942,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NR = NR->getPointeeType();
}
- if (!getOpenCLOptions().cl_khr_fp16) {
+ if (!getOpenCLOptions().isEnabled("cl_khr_fp16")) {
// OpenCL v1.2 s6.1.1.1: reject declaring variables of the half and
// half array type (unless the cl_khr_fp16 extension is enabled).
if (Context.getBaseElementType(R)->isHalfType()) {
@@ -5898,8 +5950,40 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
D.setInvalidType();
}
}
+
+ // OpenCL v1.2 s6.9.b p4:
+ // The sampler type cannot be used with the __local and __global address
+ // space qualifiers.
+ if (R->isSamplerT() && (R.getAddressSpace() == LangAS::opencl_local ||
+ R.getAddressSpace() == LangAS::opencl_global)) {
+ Diag(D.getIdentifierLoc(), diag::err_wrong_sampler_addressspace);
+ }
+
+ // OpenCL v1.2 s6.9.r:
+ // The event type cannot be used with the __local, __constant and __global
+ // address space qualifiers.
+ if (R->isEventT()) {
+ if (R.getAddressSpace()) {
+ Diag(D.getLocStart(), diag::err_event_t_addr_space_qual);
+ D.setInvalidType();
+ }
+ }
}
+ DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec();
+ StorageClass SC = StorageClassSpecToVarDeclStorageClass(D.getDeclSpec());
+
+ // dllimport globals without explicit storage class are treated as extern. We
+ // have to change the storage class this early to get the right DeclContext.
+ if (SC == SC_None && !DC->isRecord() &&
+ hasParsedAttr(S, D, AttributeList::AT_DLLImport) &&
+ !hasParsedAttr(S, D, AttributeList::AT_DLLExport))
+ SC = SC_Extern;
+
+ DeclContext *OriginalDC = DC;
+ bool IsLocalExternDecl = SC == SC_Extern &&
+ adjustContextForLocalExternDecl(DC);
+
if (SCSpec == DeclSpec::SCS_mutable) {
// mutable can only appear on non-static class members, so it's always
// an error here
@@ -5920,13 +6004,6 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
}
- IdentifierInfo *II = Name.getAsIdentifierInfo();
- if (!II) {
- Diag(D.getIdentifierLoc(), diag::err_bad_variable_name)
- << Name;
- return nullptr;
- }
-
DiagnoseFunctionSpecifiers(D.getDeclSpec());
if (!DC->isRecord() && S->getFnParent() == nullptr) {
@@ -5939,32 +6016,6 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
- if (getLangOpts().OpenCL) {
- // OpenCL v1.2 s6.9.b p4:
- // The sampler type cannot be used with the __local and __global address
- // space qualifiers.
- if (R->isSamplerT() && (R.getAddressSpace() == LangAS::opencl_local ||
- R.getAddressSpace() == LangAS::opencl_global)) {
- Diag(D.getIdentifierLoc(), diag::err_wrong_sampler_addressspace);
- }
-
- // OpenCL 1.2 spec, p6.9 r:
- // The event type cannot be used to declare a program scope variable.
- // The event type cannot be used with the __local, __constant and __global
- // address space qualifiers.
- if (R->isEventT()) {
- if (S->getParent() == nullptr) {
- Diag(D.getLocStart(), diag::err_event_t_global_var);
- D.setInvalidType();
- }
-
- if (R.getAddressSpace()) {
- Diag(D.getLocStart(), diag::err_event_t_addr_space_qual);
- D.setInvalidType();
- }
- }
- }
-
bool IsExplicitSpecialization = false;
bool IsVariableTemplateSpecialization = false;
bool IsPartialSpecialization = false;
@@ -6095,6 +6146,10 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
return nullptr;
NewVD = cast<VarDecl>(Res.get());
AddToScope = false;
+ } else if (D.isDecompositionDeclarator()) {
+ NewVD = DecompositionDecl::Create(Context, DC, D.getLocStart(),
+ D.getIdentifierLoc(), R, TInfo, SC,
+ Bindings);
} else
NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
D.getIdentifierLoc(), II, R, TInfo, SC);
@@ -6200,8 +6255,13 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (NewTemplate)
NewTemplate->setLexicalDeclContext(CurContext);
- if (IsLocalExternDecl)
- NewVD->setLocalExternDecl();
+ if (IsLocalExternDecl) {
+ if (D.isDecompositionDeclarator())
+ for (auto *B : Bindings)
+ B->setLocalExternDecl();
+ else
+ NewVD->setLocalExternDecl();
+ }
bool EmitTLSUnsupportedError = false;
if (DeclSpec::TSCS TSCS = D.getDeclSpec().getThreadStorageClassSpec()) {
@@ -6273,6 +6333,8 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewVD->setModulePrivate();
if (NewTemplate)
NewTemplate->setModulePrivate();
+ for (auto *B : Bindings)
+ B->setModulePrivate();
}
}
@@ -6480,7 +6542,7 @@ Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
// Special handling of variable named 'main'.
- if (Name.isIdentifier() && Name.getAsIdentifierInfo()->isStr("main") &&
+ if (Name.getAsIdentifierInfo() && Name.getAsIdentifierInfo()->isStr("main") &&
NewVD->getDeclContext()->getRedeclContext()->isTranslationUnit() &&
!getLangOpts().Freestanding && !NewVD->getDescribedVarTemplate()) {
@@ -6522,6 +6584,17 @@ static ShadowedDeclKind computeShadowedDeclKind(const NamedDecl *ShadowedDecl,
return OldDC->isFileContext() ? SDK_Global : SDK_Local;
}
+/// Return the location of the capture if the given lambda captures the given
+/// variable \p VD, or an invalid source location otherwise.
+static SourceLocation getCaptureLocation(const LambdaScopeInfo *LSI,
+ const VarDecl *VD) {
+ for (const LambdaScopeInfo::Capture &Capture : LSI->Captures) {
+ if (Capture.isVariableCapture() && Capture.getVariable() == VD)
+ return Capture.getLocation();
+ }
+ return SourceLocation();
+}
+
/// \brief Diagnose variable or built-in function shadowing. Implements
/// -Wshadow.
///
@@ -6580,6 +6653,29 @@ void Sema::CheckShadow(Scope *S, VarDecl *D, const LookupResult& R) {
DeclContext *OldDC = ShadowedDecl->getDeclContext();
+ unsigned WarningDiag = diag::warn_decl_shadow;
+ SourceLocation CaptureLoc;
+ if (isa<VarDecl>(ShadowedDecl) && NewDC && isa<CXXMethodDecl>(NewDC)) {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(NewDC->getParent())) {
+ if (RD->isLambda() && OldDC->Encloses(NewDC->getLexicalParent())) {
+ if (RD->getLambdaCaptureDefault() == LCD_None) {
+ // Try to avoid warnings for lambdas with an explicit capture list.
+ const auto *LSI = cast<LambdaScopeInfo>(getCurFunction());
+ // Warn only when the lambda captures the shadowed decl explicitly.
+ CaptureLoc = getCaptureLocation(LSI, cast<VarDecl>(ShadowedDecl));
+ if (CaptureLoc.isInvalid())
+ WarningDiag = diag::warn_decl_shadow_uncaptured_local;
+ } else {
+ // Remember that this was shadowed so we can avoid the warning if the
+ // shadowed decl isn't captured and the warning settings allow it.
+ cast<LambdaScopeInfo>(getCurFunction())
+ ->ShadowingDecls.push_back({D, cast<VarDecl>(ShadowedDecl)});
+ return;
+ }
+ }
+ }
+ }
+
// Only warn about certain kinds of shadowing for class members.
if (NewDC && NewDC->isRecord()) {
// In particular, don't warn about shadowing non-class members.
@@ -6601,10 +6697,33 @@ void Sema::CheckShadow(Scope *S, VarDecl *D, const LookupResult& R) {
if (getSourceManager().isInSystemMacro(R.getNameLoc()))
return;
ShadowedDeclKind Kind = computeShadowedDeclKind(ShadowedDecl, OldDC);
- Diag(R.getNameLoc(), diag::warn_decl_shadow) << Name << Kind << OldDC;
+ Diag(R.getNameLoc(), WarningDiag) << Name << Kind << OldDC;
+ if (!CaptureLoc.isInvalid())
+ Diag(CaptureLoc, diag::note_var_explicitly_captured_here)
+ << Name << /*explicitly*/ 1;
Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
}
+/// Diagnose shadowing for variables shadowed in the lambda record \p LambdaRD
+/// when these variables are captured by the lambda.
+void Sema::DiagnoseShadowingLambdaDecls(const LambdaScopeInfo *LSI) {
+ for (const auto &Shadow : LSI->ShadowingDecls) {
+ const VarDecl *ShadowedDecl = Shadow.ShadowedDecl;
+ // Try to avoid the warning when the shadowed decl isn't captured.
+ SourceLocation CaptureLoc = getCaptureLocation(LSI, ShadowedDecl);
+ const DeclContext *OldDC = ShadowedDecl->getDeclContext();
+ Diag(Shadow.VD->getLocation(), CaptureLoc.isInvalid()
+ ? diag::warn_decl_shadow_uncaptured_local
+ : diag::warn_decl_shadow)
+ << Shadow.VD->getDeclName()
+ << computeShadowedDeclKind(ShadowedDecl, OldDC) << OldDC;
+ if (!CaptureLoc.isInvalid())
+ Diag(CaptureLoc, diag::note_var_explicitly_captured_here)
+ << Shadow.VD->getDeclName() << /*explicitly*/ 0;
+ Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
+ }
+}
+
/// \brief Check -Wshadow without the advantage of a previous lookup.
void Sema::CheckShadow(Scope *S, VarDecl *D) {
if (Diags.isIgnored(diag::warn_decl_shadow, D->getLocation()))
@@ -6793,7 +6912,7 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
// OpenCL v1.2 s6.8 - The static qualifier is valid only in program
// scope.
if (getLangOpts().OpenCLVersion == 120 &&
- !getOpenCLOptions().cl_clang_storage_class_specifiers &&
+ !getOpenCLOptions().isEnabled("cl_clang_storage_class_specifiers") &&
NewVD->isStaticLocal()) {
Diag(NewVD->getLocation(), diag::err_static_function_scope);
NewVD->setInvalidDecl();
@@ -6821,17 +6940,6 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
NewVD->setInvalidDecl();
return;
}
- // OpenCL v2.0 s6.12.5 - Blocks with variadic arguments are not supported.
- // TODO: this check is not enough as it doesn't diagnose the typedef
- const BlockPointerType *BlkTy = T->getAs<BlockPointerType>();
- const FunctionProtoType *FTy =
- BlkTy->getPointeeType()->getAs<FunctionProtoType>();
- if (FTy && FTy->isVariadic()) {
- Diag(NewVD->getLocation(), diag::err_opencl_block_proto_variadic)
- << T << NewVD->getSourceRange();
- NewVD->setInvalidDecl();
- return;
- }
}
// OpenCL v1.2 s6.5 - All program scope variables must be declared in the
// __constant address space.
@@ -7481,18 +7589,20 @@ enum OpenCLParamType {
ValidKernelParam,
PtrPtrKernelParam,
PtrKernelParam,
- PrivatePtrKernelParam,
+ InvalidAddrSpacePtrKernelParam,
InvalidKernelParam,
RecordKernelParam
};
-static OpenCLParamType getOpenCLKernelParameterType(QualType PT) {
+static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
if (PT->isPointerType()) {
QualType PointeeType = PT->getPointeeType();
if (PointeeType->isPointerType())
return PtrPtrKernelParam;
- return PointeeType.getAddressSpace() == 0 ? PrivatePtrKernelParam
- : PtrKernelParam;
+ if (PointeeType.getAddressSpace() == LangAS::opencl_generic ||
+ PointeeType.getAddressSpace() == 0)
+ return InvalidAddrSpacePtrKernelParam;
+ return PtrKernelParam;
}
// TODO: Forbid the other integer types (size_t, ptrdiff_t...) when they can
@@ -7507,7 +7617,10 @@ static OpenCLParamType getOpenCLKernelParameterType(QualType PT) {
if (PT->isEventT())
return InvalidKernelParam;
- if (PT->isHalfType())
+ // OpenCL extension spec v1.2 s9.5:
+ // This extension adds support for half scalar and vector types as built-in
+ // types that can be used for arithmetic operations, conversions etc.
+ if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16") && PT->isHalfType())
return InvalidKernelParam;
if (PT->isRecordType())
@@ -7528,7 +7641,7 @@ static void checkIsValidOpenCLKernelParameter(
if (ValidTypes.count(PT.getTypePtr()))
return;
- switch (getOpenCLKernelParameterType(PT)) {
+ switch (getOpenCLKernelParameterType(S, PT)) {
case PtrPtrKernelParam:
// OpenCL v1.2 s6.9.a:
// A kernel function argument cannot be declared as a
@@ -7537,11 +7650,12 @@ static void checkIsValidOpenCLKernelParameter(
D.setInvalidType();
return;
- case PrivatePtrKernelParam:
- // OpenCL v1.2 s6.9.a:
- // A kernel function argument cannot be declared as a
- // pointer to the private address space.
- S.Diag(Param->getLocation(), diag::err_opencl_private_ptr_kernel_param);
+ case InvalidAddrSpacePtrKernelParam:
+ // OpenCL v1.0 s6.5:
+ // __kernel function arguments declared to be a pointer of a type can point
+ // to one of the following address spaces only : __global, __local or
+ // __constant.
+ S.Diag(Param->getLocation(), diag::err_kernel_arg_address_space);
D.setInvalidType();
return;
@@ -7555,7 +7669,10 @@ static void checkIsValidOpenCLKernelParameter(
// OpenCL v1.2 s6.8 n:
// A kernel function argument cannot be declared
// of event_t type.
- S.Diag(Param->getLocation(), diag::err_bad_kernel_param_type) << PT;
+ // Do not diagnose half type since it is diagnosed as invalid argument
+ // type for any function elsewhere.
+ if (!PT->isHalfType())
+ S.Diag(Param->getLocation(), diag::err_bad_kernel_param_type) << PT;
D.setInvalidType();
return;
@@ -7611,7 +7728,7 @@ static void checkIsValidOpenCLKernelParameter(
if (ValidTypes.count(QT.getTypePtr()))
continue;
- OpenCLParamType ParamType = getOpenCLKernelParameterType(QT);
+ OpenCLParamType ParamType = getOpenCLKernelParameterType(S, QT);
if (ParamType == ValidKernelParam)
continue;
@@ -7625,7 +7742,7 @@ static void checkIsValidOpenCLKernelParameter(
// do not allow OpenCL objects to be passed as elements of the struct or
// union.
if (ParamType == PtrKernelParam || ParamType == PtrPtrKernelParam ||
- ParamType == PrivatePtrKernelParam) {
+ ParamType == InvalidAddrSpacePtrKernelParam) {
S.Diag(Param->getLocation(),
diag::err_record_with_pointers_kernel_param)
<< PT->isUnionType()
@@ -7657,6 +7774,28 @@ static void checkIsValidOpenCLKernelParameter(
} while (!VisitStack.empty());
}
+/// Find the DeclContext in which a tag is implicitly declared if we see an
+/// elaborated type specifier in the specified context, and lookup finds
+/// nothing.
+static DeclContext *getTagInjectionContext(DeclContext *DC) {
+ while (!DC->isFileContext() && !DC->isFunctionOrMethod())
+ DC = DC->getParent();
+ return DC;
+}
+
+/// Find the Scope in which a tag is implicitly declared if we see an
+/// elaborated type specifier in the specified context, and lookup finds
+/// nothing.
+static Scope *getTagInjectionScope(Scope *S, const LangOptions &LangOpts) {
+ while (S->isClassScope() ||
+ (LangOpts.CPlusPlus &&
+ S->isFunctionPrototypeScope()) ||
+ ((S->getFlags() & Scope::DeclScope) == 0) ||
+ (S->getEntity() && S->getEntity()->isTransparentContext()))
+ S = S->getParent();
+ return S;
+}
+
NamedDecl*
Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo, LookupResult &Previous,
@@ -8111,8 +8250,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Copy the parameter declarations from the declarator D to the function
// declaration NewFD, if they are available. First scavenge them into Params.
SmallVector<ParmVarDecl*, 16> Params;
- if (D.isFunctionDeclarator()) {
- DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+ unsigned FTIIdx;
+ if (D.isFunctionDeclarator(FTIIdx)) {
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getTypeObject(FTIIdx).Fun;
// Check for C99 6.7.5.3p10 - foo(void) is a non-varargs
// function that takes no arguments, not a function that takes a
@@ -8130,6 +8270,41 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
NewFD->setInvalidDecl();
}
}
+
+ if (!getLangOpts().CPlusPlus) {
+ // In C, find all the tag declarations from the prototype and move them
+ // into the function DeclContext. Remove them from the surrounding tag
+ // injection context of the function, which is typically but not always
+ // the TU.
+ DeclContext *PrototypeTagContext =
+ getTagInjectionContext(NewFD->getLexicalDeclContext());
+ for (NamedDecl *NonParmDecl : FTI.getDeclsInPrototype()) {
+ auto *TD = dyn_cast<TagDecl>(NonParmDecl);
+
+ // We don't want to reparent enumerators. Look at their parent enum
+ // instead.
+ if (!TD) {
+ if (auto *ECD = dyn_cast<EnumConstantDecl>(NonParmDecl))
+ TD = cast<EnumDecl>(ECD->getDeclContext());
+ }
+ if (!TD)
+ continue;
+ DeclContext *TagDC = TD->getLexicalDeclContext();
+ if (!TagDC->containsDecl(TD))
+ continue;
+ TagDC->removeDecl(TD);
+ TD->setDeclContext(NewFD);
+ NewFD->addDecl(TD);
+
+ // Preserve the lexical DeclContext if it is not the surrounding tag
+ // injection context of the FD. In this example, the semantic context of
+ // E will be f and the lexical context will be S, while both the
+ // semantic and lexical contexts of S will be f:
+ // void f(struct S { enum E { a } f; } s);
+ if (TagDC != PrototypeTagContext)
+ TD->setLexicalDeclContext(TagDC);
+ }
+ }
} else if (const FunctionProtoType *FT = R->getAs<FunctionProtoType>()) {
// When we're declaring a function with a typedef, typeof, etc as in the
// following example, we'll need to synthesize (unnamed)
@@ -8155,15 +8330,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Finally, we know we have the right number of parameters, install them.
NewFD->setParams(Params);
- // Find all anonymous symbols defined during the declaration of this function
- // and add to NewFD. This lets us track decls such 'enum Y' in:
- //
- // void f(enum Y {AA} x) {}
- //
- // which would otherwise incorrectly end up in the translation unit scope.
- NewFD->setDeclsInPrototypeScope(DeclsInPrototypeScope);
- DeclsInPrototypeScope.clear();
-
if (D.getDeclSpec().isNoreturnSpecified())
NewFD->addAttr(
::new(Context) C11NoReturnAttr(D.getDeclSpec().getNoreturnSpecLoc(),
@@ -8194,9 +8360,6 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// Handle attributes.
ProcessDeclAttributes(S, NewFD, D);
- if (getLangOpts().CUDA)
- maybeAddCUDAHostDeviceAttrs(S, NewFD, Previous);
-
if (getLangOpts().OpenCL) {
// OpenCL v1.1 s6.5: Using an address space qualifier in a function return
// type declaration will generate a compilation error.
@@ -8299,6 +8462,15 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TemplateArgs.setRAngleLoc(D.getIdentifierLoc());
}
+ // We do not add HD attributes to specializations here because
+ // they may have different constexpr-ness compared to their
+ // templates and, after maybeAddCUDAHostDeviceAttrs() is applied,
+ // may end up with different effective targets. Instead, a
+ // specialization inherits its target attributes from its template
+ // in the CheckFunctionTemplateSpecialization() call below.
+ if (getLangOpts().CUDA & !isFunctionTemplateSpecialization)
+ maybeAddCUDAHostDeviceAttrs(NewFD, Previous);
+
// If it's a friend (and only if it's a friend), it's possible
// that either the specialized function type or the specialized
// template is dependent, and therefore matching will fail. In
@@ -8376,7 +8548,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
? cast<NamedDecl>(FunctionTemplate)
: NewFD);
- if (isFriend && D.isRedeclaration()) {
+ if (isFriend && NewFD->getPreviousDecl()) {
AccessSpecifier Access = AS_public;
if (!NewFD->isInvalidDecl())
Access = NewFD->getPreviousDecl()->getAccess();
@@ -8618,6 +8790,32 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
return NewFD;
}
+/// \brief Checks if the new declaration declared in dependent context must be
+/// put in the same redeclaration chain as the specified declaration.
+///
+/// \param D Declaration that is checked.
+/// \param PrevDecl Previous declaration found with proper lookup method for the
+/// same declaration name.
+/// \returns True if D must be added to the redeclaration chain which PrevDecl
+/// belongs to.
+///
+bool Sema::shouldLinkDependentDeclWithPrevious(Decl *D, Decl *PrevDecl) {
+ // Any declarations should be put into redeclaration chains except for
+ // friend declaration in a dependent context that names a function in
+ // namespace scope.
+ //
+ // This allows to compile code like:
+ //
+ // void func();
+ // template<typename T> class C1 { friend void func() { } };
+ // template<typename T> class C2 { friend void func() { } };
+ //
+ // This code snippet is a valid code unless both templates are instantiated.
+ return !(D->getLexicalDeclContext()->isDependentContext() &&
+ D->getDeclContext()->isFileContext() &&
+ D->getFriendObjectKind() != Decl::FOK_None);
+}
+
/// \brief Perform semantic checking of a new function declaration.
///
/// Performs semantic analysis of the new function declaration
@@ -8801,11 +8999,12 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
}
} else {
- // This needs to happen first so that 'inline' propagates.
- NewFD->setPreviousDeclaration(cast<FunctionDecl>(OldDecl));
-
- if (isa<CXXMethodDecl>(NewFD))
- NewFD->setAccess(OldDecl->getAccess());
+ if (shouldLinkDependentDeclWithPrevious(NewFD, OldDecl)) {
+ // This needs to happen first so that 'inline' propagates.
+ NewFD->setPreviousDeclaration(cast<FunctionDecl>(OldDecl));
+ if (isa<CXXMethodDecl>(NewFD))
+ NewFD->setAccess(OldDecl->getAccess());
+ }
}
}
@@ -8880,11 +9079,16 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
ASTContext::GetBuiltinTypeError Error;
LookupPredefedObjCSuperType(*this, S, NewFD->getIdentifier());
QualType T = Context.GetBuiltinType(BuiltinID, Error);
- if (!T.isNull() && !Context.hasSameType(T, NewFD->getType())) {
+ // If the type of the builtin differs only in its exception
+ // specification, that's OK.
+ // FIXME: If the types do differ in this way, it would be better to
+ // retain the 'noexcept' form of the type.
+ if (!T.isNull() &&
+ !Context.hasSameFunctionTypeIgnoringExceptionSpec(T,
+ NewFD->getType()))
// The type of this function differs from the type of the builtin,
// so forget about the builtin entirely.
Context.BuiltinInfo.forgetBuiltin(BuiltinID, Context.Idents);
- }
}
// If this function is declared as being extern "C", then check to see if
@@ -8900,6 +9104,45 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
!R->isObjCObjectPointerType())
Diag(NewFD->getLocation(), diag::warn_return_value_udt) << NewFD << R;
}
+
+ // C++1z [dcl.fct]p6:
+ // [...] whether the function has a non-throwing exception-specification
+ // [is] part of the function type
+ //
+ // This results in an ABI break between C++14 and C++17 for functions whose
+ // declared type includes an exception-specification in a parameter or
+ // return type. (Exception specifications on the function itself are OK in
+ // most cases, and exception specifications are not permitted in most other
+ // contexts where they could make it into a mangling.)
+ if (!getLangOpts().CPlusPlus1z && !NewFD->getPrimaryTemplate()) {
+ auto HasNoexcept = [&](QualType T) -> bool {
+ // Strip off declarator chunks that could be between us and a function
+ // type. We don't need to look far, exception specifications are very
+ // restricted prior to C++17.
+ if (auto *RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType();
+ else if (T->isAnyPointerType())
+ T = T->getPointeeType();
+ else if (auto *MPT = T->getAs<MemberPointerType>())
+ T = MPT->getPointeeType();
+ if (auto *FPT = T->getAs<FunctionProtoType>())
+ if (FPT->isNothrow(Context))
+ return true;
+ return false;
+ };
+
+ auto *FPT = NewFD->getType()->castAs<FunctionProtoType>();
+ bool AnyNoexcept = HasNoexcept(FPT->getReturnType());
+ for (QualType T : FPT->param_types())
+ AnyNoexcept |= HasNoexcept(T);
+ if (AnyNoexcept)
+ Diag(NewFD->getLocation(),
+ diag::warn_cxx1z_compat_exception_spec_in_signature)
+ << NewFD;
+ }
+
+ if (!Redeclaration && LangOpts.CUDA)
+ checkCUDATargetOverload(NewFD, Previous);
}
return Redeclaration;
}
@@ -9421,6 +9664,20 @@ namespace {
}
} // end anonymous namespace
+namespace {
+ // Simple wrapper to add the name of a variable or (if no variable is
+ // available) a DeclarationName into a diagnostic.
+ struct VarDeclOrName {
+ VarDecl *VDecl;
+ DeclarationName Name;
+
+ friend const Sema::SemaDiagnosticBuilder &
+ operator<<(const Sema::SemaDiagnosticBuilder &Diag, VarDeclOrName VN) {
+ return VN.VDecl ? Diag << VN.VDecl : Diag << VN.Name;
+ }
+ };
+} // end anonymous namespace
+
QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
DeclarationName Name, QualType Type,
TypeSourceInfo *TSI,
@@ -9430,6 +9687,8 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
assert((!VDecl || !VDecl->isInitCapture()) &&
"init captures are expected to be deduced prior to initialization");
+ VarDeclOrName VN{VDecl, Name};
+
ArrayRef<Expr *> DeduceInits = Init;
if (DirectInit) {
if (auto *PL = dyn_cast<ParenListExpr>(Init))
@@ -9445,7 +9704,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
Diag(Init->getLocStart(), IsInitCapture
? diag::err_init_capture_no_expression
: diag::err_auto_var_init_no_expression)
- << Name << Type << Range;
+ << VN << Type << Range;
return QualType();
}
@@ -9453,7 +9712,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
Diag(DeduceInits[1]->getLocStart(),
IsInitCapture ? diag::err_init_capture_multiple_expressions
: diag::err_auto_var_init_multiple_expressions)
- << Name << Type << Range;
+ << VN << Type << Range;
return QualType();
}
@@ -9462,7 +9721,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
Diag(Init->getLocStart(), IsInitCapture
? diag::err_init_capture_paren_braces
: diag::err_auto_var_init_paren_braces)
- << isa<InitListExpr>(Init) << Name << Type << Range;
+ << isa<InitListExpr>(Init) << VN << Type << Range;
return QualType();
}
@@ -9478,6 +9737,15 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
DefaultedAnyToId = true;
}
+ // C++ [dcl.decomp]p1:
+ // If the assignment-expression [...] has array type A and no ref-qualifier
+ // is present, e has type cv A
+ if (VDecl && isa<DecompositionDecl>(VDecl) &&
+ Context.hasSameUnqualifiedType(Type, Context.getAutoDeductType()) &&
+ DeduceInit->getType()->isConstantArrayType())
+ return Context.getQualifiedType(DeduceInit->getType(),
+ Type.getQualifiers());
+
QualType DeducedType;
if (DeduceAutoType(TSI, DeduceInit, DeducedType) == DAR_Failed) {
if (!IsInitCapture)
@@ -9485,13 +9753,13 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
else if (isa<InitListExpr>(Init))
Diag(Range.getBegin(),
diag::err_init_capture_deduction_failure_from_init_list)
- << Name
+ << VN
<< (DeduceInit->getType().isNull() ? TSI->getType()
: DeduceInit->getType())
<< DeduceInit->getSourceRange();
else
Diag(Range.getBegin(), diag::err_init_capture_deduction_failure)
- << Name << TSI->getType()
+ << VN << TSI->getType()
<< (DeduceInit->getType().isNull() ? TSI->getType()
: DeduceInit->getType())
<< DeduceInit->getSourceRange();
@@ -9505,7 +9773,7 @@ QualType Sema::deduceVarTypeFromInitializer(VarDecl *VDecl,
if (ActiveTemplateInstantiations.empty() && !DefaultedAnyToId &&
!IsInitCapture && !DeducedType.isNull() && DeducedType->isObjCIdType()) {
SourceLocation Loc = TSI->getTypeLoc().getBeginLoc();
- Diag(Loc, diag::warn_auto_var_is_id) << Name << Range;
+ Diag(Loc, diag::warn_auto_var_is_id) << VN << Range;
}
return DeducedType;
@@ -9614,25 +9882,15 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
VDecl->setInvalidDecl();
}
+ // If adding the initializer will turn this declaration into a definition,
+ // and we already have a definition for this variable, diagnose or otherwise
+ // handle the situation.
VarDecl *Def;
if ((Def = VDecl->getDefinition()) && Def != VDecl &&
- (!VDecl->isStaticDataMember() || VDecl->isOutOfLine())) {
- NamedDecl *Hidden = nullptr;
- if (!hasVisibleDefinition(Def, &Hidden) &&
- (VDecl->getFormalLinkage() == InternalLinkage ||
- VDecl->getDescribedVarTemplate() ||
- VDecl->getNumTemplateParameterLists() ||
- VDecl->getDeclContext()->isDependentContext())) {
- // The previous definition is hidden, and multiple definitions are
- // permitted (in separate TUs). Form another definition of it.
- } else {
- Diag(VDecl->getLocation(), diag::err_redefinition)
- << VDecl->getDeclName();
- Diag(Def->getLocation(), diag::note_previous_definition);
- VDecl->setInvalidDecl();
- return;
- }
- }
+ (!VDecl->isStaticDataMember() || VDecl->isOutOfLine()) &&
+ !VDecl->isThisDeclarationADemotedDefinition() &&
+ checkVarDeclRedefinition(Def, VDecl))
+ return;
if (getLangOpts().CPlusPlus) {
// C++ [class.static.data]p4
@@ -9692,6 +9950,18 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
// Perform the initialization.
ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
if (!VDecl->isInvalidDecl()) {
+ // Handle errors like: int a({0})
+ if (CXXDirectInit && CXXDirectInit->getNumExprs() == 1 &&
+ !canInitializeWithParenthesizedList(VDecl->getType()))
+ if (auto IList = dyn_cast<InitListExpr>(CXXDirectInit->getExpr(0))) {
+ Diag(VDecl->getLocation(), diag::err_list_init_in_parens)
+ << VDecl->getType() << CXXDirectInit->getSourceRange()
+ << FixItHint::CreateRemoval(CXXDirectInit->getLocStart())
+ << FixItHint::CreateRemoval(CXXDirectInit->getLocEnd());
+ Init = IList;
+ CXXDirectInit = nullptr;
+ }
+
InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
InitializationKind Kind =
DirectInit
@@ -9909,10 +10179,17 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
VDecl->setInvalidDecl();
}
} else if (VDecl->isFileVarDecl()) {
+ // In C, extern is typically used to avoid tentative definitions when
+ // declaring variables in headers, but adding an intializer makes it a
+ // defintion. This is somewhat confusing, so GCC and Clang both warn on it.
+ // In C++, extern is often used to give implictly static const variables
+ // external linkage, so don't warn in that case. If selectany is present,
+ // this might be header code intended for C and C++ inclusion, so apply the
+ // C++ rules.
if (VDecl->getStorageClass() == SC_Extern &&
- (!getLangOpts().CPlusPlus ||
- !(Context.getBaseElementType(VDecl->getType()).isConstQualified() ||
- VDecl->isExternC())) &&
+ ((!getLangOpts().CPlusPlus && !VDecl->hasAttr<SelectAnyAttr>()) ||
+ !Context.getBaseElementType(VDecl->getType()).isConstQualified()) &&
+ !(getLangOpts().CPlusPlus && VDecl->isExternC()) &&
!isTemplateInstantiation(VDecl->getTemplateSpecializationKind()))
Diag(VDecl->getLocation(), diag::warn_extern_init);
@@ -9957,6 +10234,11 @@ void Sema::ActOnInitializerError(Decl *D) {
VarDecl *VD = dyn_cast<VarDecl>(D);
if (!VD) return;
+ // Bindings are not usable if we can't make sense of the initializer.
+ if (auto *DD = dyn_cast<DecompositionDecl>(D))
+ for (auto *BD : DD->bindings())
+ BD->setInvalidDecl();
+
// Auto types are meaningless if we can't make sense of the initializer.
if (ParsingInitForAutoVars.count(D)) {
D->setInvalidDecl();
@@ -9986,6 +10268,18 @@ void Sema::ActOnInitializerError(Decl *D) {
// though.
}
+/// Checks if an object of the given type can be initialized with parenthesized
+/// init-list.
+///
+/// \param TargetType Type of object being initialized.
+///
+/// The function is used to detect wrong initializations, such as 'int({0})'.
+///
+bool Sema::canInitializeWithParenthesizedList(QualType TargetType) {
+ return TargetType->isDependentType() || TargetType->isRecordType() ||
+ TargetType->getContainedAutoType();
+}
+
void Sema::ActOnUninitializedDecl(Decl *RealDecl,
bool TypeMayContainAuto) {
// If there is no declaration, there was an error parsing it. Just ignore it.
@@ -9995,6 +10289,13 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
if (VarDecl *Var = dyn_cast<VarDecl>(RealDecl)) {
QualType Type = Var->getType();
+ // C++1z [dcl.dcl]p1 grammar implies that an initializer is mandatory.
+ if (isa<DecompositionDecl>(RealDecl)) {
+ Diag(Var->getLocation(), diag::err_decomp_decl_requires_init) << Var;
+ Var->setInvalidDecl();
+ return;
+ }
+
// C++11 [dcl.spec.auto]p3
if (TypeMayContainAuto && Type->getContainedAutoType()) {
Diag(Var->getLocation(), diag::err_auto_var_requires_init)
@@ -10009,7 +10310,8 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl,
// C++11 [dcl.constexpr]p1: The constexpr specifier shall be applied only to
// the definition of a variable [...] or the declaration of a static data
// member.
- if (Var->isConstexpr() && !Var->isThisDeclarationADefinition()) {
+ if (Var->isConstexpr() && !Var->isThisDeclarationADefinition() &&
+ !Var->isThisDeclarationADemotedDefinition()) {
if (Var->isStaticDataMember()) {
// C++1z removes the relevant rule; the in-class declaration is always
// a definition there.
@@ -10344,8 +10646,17 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
Diag(var->getLocation(), diag::warn_missing_variable_declarations) << var;
}
+ // Cache the result of checking for constant initialization.
+ Optional<bool> CacheHasConstInit;
+ const Expr *CacheCulprit;
+ auto checkConstInit = [&]() mutable {
+ if (!CacheHasConstInit)
+ CacheHasConstInit = var->getInit()->isConstantInitializer(
+ Context, var->getType()->isReferenceType(), &CacheCulprit);
+ return *CacheHasConstInit;
+ };
+
if (var->getTLSKind() == VarDecl::TLS_Static) {
- const Expr *Culprit;
if (var->getType().isDestructedType()) {
// GNU C++98 edits for __thread, [basic.start.term]p3:
// The type of an object with thread storage duration shall not
@@ -10353,17 +10664,17 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
Diag(var->getLocation(), diag::err_thread_nontrivial_dtor);
if (getLangOpts().CPlusPlus11)
Diag(var->getLocation(), diag::note_use_thread_local);
- } else if (getLangOpts().CPlusPlus && var->hasInit() &&
- !var->getInit()->isConstantInitializer(
- Context, var->getType()->isReferenceType(), &Culprit)) {
- // GNU C++98 edits for __thread, [basic.start.init]p4:
- // An object of thread storage duration shall not require dynamic
- // initialization.
- // FIXME: Need strict checking here.
- Diag(Culprit->getExprLoc(), diag::err_thread_dynamic_init)
- << Culprit->getSourceRange();
- if (getLangOpts().CPlusPlus11)
- Diag(var->getLocation(), diag::note_use_thread_local);
+ } else if (getLangOpts().CPlusPlus && var->hasInit()) {
+ if (!checkConstInit()) {
+ // GNU C++98 edits for __thread, [basic.start.init]p4:
+ // An object of thread storage duration shall not require dynamic
+ // initialization.
+ // FIXME: Need strict checking here.
+ Diag(CacheCulprit->getExprLoc(), diag::err_thread_dynamic_init)
+ << CacheCulprit->getSourceRange();
+ if (getLangOpts().CPlusPlus11)
+ Diag(var->getLocation(), diag::note_use_thread_local);
+ }
}
}
@@ -10400,7 +10711,16 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
}
// All the following checks are C++ only.
- if (!getLangOpts().CPlusPlus) return;
+ if (!getLangOpts().CPlusPlus) {
+ // If this variable must be emitted, add it as an initializer for the
+ // current module.
+ if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
+ Context.addModuleInitializer(ModuleScopes.back().Module, var);
+ return;
+ }
+
+ if (auto *DD = dyn_cast<DecompositionDecl>(var))
+ CheckCompleteDecompositionDeclaration(DD);
QualType type = var->getType();
if (type->isDependentType()) return;
@@ -10434,18 +10754,6 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (!var->getDeclContext()->isDependentContext() &&
Init && !Init->isValueDependent()) {
- if (IsGlobal && !var->isConstexpr() &&
- !getDiagnostics().isIgnored(diag::warn_global_constructor,
- var->getLocation())) {
- // Warn about globals which don't have a constant initializer. Don't
- // warn about globals with a non-trivial destructor because we already
- // warned about them.
- CXXRecordDecl *RD = baseType->getAsCXXRecordDecl();
- if (!(RD && !RD->hasTrivialDestructor()) &&
- !Init->isConstantInitializer(Context, baseType->isReferenceType()))
- Diag(var->getLocation(), diag::warn_global_constructor)
- << Init->getSourceRange();
- }
if (var->isConstexpr()) {
SmallVector<PartialDiagnosticAt, 8> Notes;
@@ -10469,11 +10777,45 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// initialized by a constant expression if we check later.
var->checkInitIsICE();
}
+
+ // Don't emit further diagnostics about constexpr globals since they
+ // were just diagnosed.
+ if (!var->isConstexpr() && GlobalStorage &&
+ var->hasAttr<RequireConstantInitAttr>()) {
+ // FIXME: Need strict checking in C++03 here.
+ bool DiagErr = getLangOpts().CPlusPlus11
+ ? !var->checkInitIsICE() : !checkConstInit();
+ if (DiagErr) {
+ auto attr = var->getAttr<RequireConstantInitAttr>();
+ Diag(var->getLocation(), diag::err_require_constant_init_failed)
+ << Init->getSourceRange();
+ Diag(attr->getLocation(), diag::note_declared_required_constant_init_here)
+ << attr->getRange();
+ }
+ }
+ else if (!var->isConstexpr() && IsGlobal &&
+ !getDiagnostics().isIgnored(diag::warn_global_constructor,
+ var->getLocation())) {
+ // Warn about globals which don't have a constant initializer. Don't
+ // warn about globals with a non-trivial destructor because we already
+ // warned about them.
+ CXXRecordDecl *RD = baseType->getAsCXXRecordDecl();
+ if (!(RD && !RD->hasTrivialDestructor())) {
+ if (!checkConstInit())
+ Diag(var->getLocation(), diag::warn_global_constructor)
+ << Init->getSourceRange();
+ }
+ }
}
// Require the destructor.
if (const RecordType *recordType = baseType->getAs<RecordType>())
FinalizeVarWithDestructor(var, recordType);
+
+ // If this variable must be emitted, add it as an initializer for the current
+ // module.
+ if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
+ Context.addModuleInitializer(ModuleScopes.back().Module, var);
}
/// \brief Determines if a variable's alignment is dependent.
@@ -10497,6 +10839,12 @@ Sema::FinalizeDeclaration(Decl *ThisDecl) {
if (!VD)
return;
+ if (auto *DD = dyn_cast<DecompositionDecl>(ThisDecl)) {
+ for (auto *BD : DD->bindings()) {
+ FinalizeDeclaration(BD);
+ }
+ }
+
checkAttributesAfterMerging(*this, *VD);
// Perform TLS alignment check here after attributes attached to the variable
@@ -10527,12 +10875,11 @@ Sema::FinalizeDeclaration(Decl *ThisDecl) {
// CUDA E.2.9.4: Within the body of a __device__ or __global__
// function, only __shared__ variables may be declared with
// static storage class.
- if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
- (FD->hasAttr<CUDADeviceAttr>() || FD->hasAttr<CUDAGlobalAttr>()) &&
- !VD->hasAttr<CUDASharedAttr>()) {
- Diag(VD->getLocation(), diag::err_device_static_local_var);
+ if (getLangOpts().CUDA && !VD->hasAttr<CUDASharedAttr>() &&
+ CUDADiagIfDeviceCode(VD->getLocation(),
+ diag::err_device_static_local_var)
+ << CurrentCUDATarget())
VD->setInvalidDecl();
- }
}
}
@@ -10541,36 +10888,55 @@ Sema::FinalizeDeclaration(Decl *ThisDecl) {
// 7.5). We must also apply the same checks to all __shared__
// variables whether they are local or not. CUDA also allows
// constant initializers for __constant__ and __device__ variables.
- if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
+ if (getLangOpts().CUDA) {
const Expr *Init = VD->getInit();
- if (Init && VD->hasGlobalStorage() &&
- (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>() ||
- VD->hasAttr<CUDASharedAttr>())) {
- assert((!VD->isStaticLocal() || VD->hasAttr<CUDASharedAttr>()));
- bool AllowedInit = false;
- if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init))
- AllowedInit =
- isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor());
- // We'll allow constant initializers even if it's a non-empty
- // constructor according to CUDA rules. This deviates from NVCC,
- // but allows us to handle things like constexpr constructors.
- if (!AllowedInit &&
- (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>()))
- AllowedInit = VD->getInit()->isConstantInitializer(
- Context, VD->getType()->isReferenceType());
-
- // Also make sure that destructor, if there is one, is empty.
- if (AllowedInit)
- if (CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl())
+ if (Init && VD->hasGlobalStorage()) {
+ if (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>() ||
+ VD->hasAttr<CUDASharedAttr>()) {
+ assert(!VD->isStaticLocal() || VD->hasAttr<CUDASharedAttr>());
+ bool AllowedInit = false;
+ if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init))
AllowedInit =
- isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor());
-
- if (!AllowedInit) {
- Diag(VD->getLocation(), VD->hasAttr<CUDASharedAttr>()
- ? diag::err_shared_var_init
- : diag::err_dynamic_var_init)
- << Init->getSourceRange();
- VD->setInvalidDecl();
+ isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor());
+ // We'll allow constant initializers even if it's a non-empty
+ // constructor according to CUDA rules. This deviates from NVCC,
+ // but allows us to handle things like constexpr constructors.
+ if (!AllowedInit &&
+ (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>()))
+ AllowedInit = VD->getInit()->isConstantInitializer(
+ Context, VD->getType()->isReferenceType());
+
+ // Also make sure that destructor, if there is one, is empty.
+ if (AllowedInit)
+ if (CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl())
+ AllowedInit =
+ isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor());
+
+ if (!AllowedInit) {
+ Diag(VD->getLocation(), VD->hasAttr<CUDASharedAttr>()
+ ? diag::err_shared_var_init
+ : diag::err_dynamic_var_init)
+ << Init->getSourceRange();
+ VD->setInvalidDecl();
+ }
+ } else {
+ // This is a host-side global variable. Check that the initializer is
+ // callable from the host side.
+ const FunctionDecl *InitFn = nullptr;
+ if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init)) {
+ InitFn = CE->getConstructor();
+ } else if (const CallExpr *CE = dyn_cast<CallExpr>(Init)) {
+ InitFn = CE->getDirectCallee();
+ }
+ if (InitFn) {
+ CUDAFunctionTarget InitFnTarget = IdentifyCUDATarget(InitFn);
+ if (InitFnTarget != CFT_Host && InitFnTarget != CFT_HostDevice) {
+ Diag(VD->getLocation(), diag::err_ref_bad_target_global_initializer)
+ << InitFnTarget << InitFn;
+ Diag(InitFn->getLocation(), diag::note_previous_decl) << InitFn;
+ VD->setInvalidDecl();
+ }
+ }
}
}
}
@@ -10675,13 +11041,36 @@ Sema::DeclGroupPtrTy Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
Decls.push_back(DS.getRepAsDecl());
DeclaratorDecl *FirstDeclaratorInGroup = nullptr;
- for (unsigned i = 0, e = Group.size(); i != e; ++i)
+ DecompositionDecl *FirstDecompDeclaratorInGroup = nullptr;
+ bool DiagnosedMultipleDecomps = false;
+
+ for (unsigned i = 0, e = Group.size(); i != e; ++i) {
if (Decl *D = Group[i]) {
- if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D))
- if (!FirstDeclaratorInGroup)
- FirstDeclaratorInGroup = DD;
+ auto *DD = dyn_cast<DeclaratorDecl>(D);
+ if (DD && !FirstDeclaratorInGroup)
+ FirstDeclaratorInGroup = DD;
+
+ auto *Decomp = dyn_cast<DecompositionDecl>(D);
+ if (Decomp && !FirstDecompDeclaratorInGroup)
+ FirstDecompDeclaratorInGroup = Decomp;
+
+ // A decomposition declaration cannot be combined with any other
+ // declaration in the same group.
+ auto *OtherDD = FirstDeclaratorInGroup;
+ if (OtherDD == FirstDecompDeclaratorInGroup)
+ OtherDD = DD;
+ if (OtherDD && FirstDecompDeclaratorInGroup &&
+ OtherDD != FirstDecompDeclaratorInGroup &&
+ !DiagnosedMultipleDecomps) {
+ Diag(FirstDecompDeclaratorInGroup->getLocation(),
+ diag::err_decomp_decl_not_alone)
+ << OtherDD->getSourceRange();
+ DiagnosedMultipleDecomps = true;
+ }
+
Decls.push_back(D);
}
+ }
if (DeclSpec::isDeclRep(DS.getTypeSpecType())) {
if (TagDecl *Tag = dyn_cast_or_null<TagDecl>(DS.getRepAsDecl())) {
@@ -11168,9 +11557,8 @@ Sema::CheckForFunctionRedefinition(FunctionDecl *FD,
SkipBody->ShouldSkip = true;
if (auto *TD = Definition->getDescribedFunctionTemplate())
makeMergedDefinitionVisible(TD, FD->getLocation());
- else
- makeMergedDefinitionVisible(const_cast<FunctionDecl*>(Definition),
- FD->getLocation());
+ makeMergedDefinitionVisible(const_cast<FunctionDecl*>(Definition),
+ FD->getLocation());
return;
}
@@ -11256,6 +11644,11 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
return D;
}
+ // Mark this function as "will have a body eventually". This lets users to
+ // call e.g. isInlineDefinitionExternallyVisible while we're still parsing
+ // this function.
+ FD->setWillHaveBody();
+
// If we are instantiating a generic lambda call operator, push
// a LambdaScopeInfo onto the function stack. But use the information
// that's already been calculated (ActOnLambdaExpr) to prime the current
@@ -11300,6 +11693,29 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
CheckParmsForFunctionDef(FD->parameters(),
/*CheckParameterNames=*/true);
+ // Add non-parameter declarations already in the function to the current
+ // scope.
+ if (FnBodyScope) {
+ for (Decl *NPD : FD->decls()) {
+ auto *NonParmDecl = dyn_cast<NamedDecl>(NPD);
+ if (!NonParmDecl)
+ continue;
+ assert(!isa<ParmVarDecl>(NonParmDecl) &&
+ "parameters should not be in newly created FD yet");
+
+ // If the decl has a name, make it accessible in the current scope.
+ if (NonParmDecl->getDeclName())
+ PushOnScopeChains(NonParmDecl, FnBodyScope, /*AddToContext=*/false);
+
+ // Similarly, dive into enums and fish their constants out, making them
+ // accessible in this scope.
+ if (auto *ED = dyn_cast<EnumDecl>(NonParmDecl)) {
+ for (auto *EI : ED->enumerators())
+ PushOnScopeChains(EI, FnBodyScope, /*AddToContext=*/false);
+ }
+ }
+ }
+
// Introduce our parameters into the function scope
for (auto Param : FD->parameters()) {
Param->setOwningFunction(FD);
@@ -11312,39 +11728,6 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
}
}
- // If we had any tags defined in the function prototype,
- // introduce them into the function scope.
- if (FnBodyScope) {
- for (ArrayRef<NamedDecl *>::iterator
- I = FD->getDeclsInPrototypeScope().begin(),
- E = FD->getDeclsInPrototypeScope().end();
- I != E; ++I) {
- NamedDecl *D = *I;
-
- // Some of these decls (like enums) may have been pinned to the
- // translation unit for lack of a real context earlier. If so, remove
- // from the translation unit and reattach to the current context.
- if (D->getLexicalDeclContext() == Context.getTranslationUnitDecl()) {
- // Is the decl actually in the context?
- if (Context.getTranslationUnitDecl()->containsDecl(D))
- Context.getTranslationUnitDecl()->removeDecl(D);
- // Either way, reassign the lexical decl context to our FunctionDecl.
- D->setLexicalDeclContext(CurContext);
- }
-
- // If the decl has a non-null name, make accessible in the current scope.
- if (!D->getName().empty())
- PushOnScopeChains(D, FnBodyScope, /*AddToContext=*/false);
-
- // Similarly, dive into enums and fish their constants out, making them
- // accessible in this scope.
- if (auto *ED = dyn_cast<EnumDecl>(D)) {
- for (auto *EI : ED->enumerators())
- PushOnScopeChains(EI, FnBodyScope, /*AddToContext=*/false);
- }
- }
- }
-
// Ensure that the function's exception specification is instantiated.
if (const FunctionProtoType *FPT = FD->getType()->getAs<FunctionProtoType>())
ResolveExceptionSpec(D->getLocation(), FPT);
@@ -11446,7 +11829,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
sema::AnalysisBasedWarnings::Policy *ActivePolicy = nullptr;
- if (getLangOpts().Coroutines && !getCurFunction()->CoroutineStmts.empty())
+ if (getLangOpts().CoroutinesTS && !getCurFunction()->CoroutineStmts.empty())
CheckCompletedCoroutineBody(FD, Body);
if (FD) {
@@ -11555,6 +11938,21 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
<< FixItHint::CreateInsertion(FTL.getRParenLoc(), "void");
}
}
+
+ // GNU warning -Wstrict-prototypes
+ // Warn if K&R function is defined without a previous declaration.
+ // This warning is issued only if the definition itself does not provide
+ // a prototype. Only K&R definitions do not provide a prototype.
+ // An empty list in a function declarator that is part of a definition
+ // of that function specifies that the function has no parameters
+ // (C99 6.7.5.3p14)
+ if (!FD->hasWrittenPrototype() && FD->getNumParams() > 0 &&
+ !LangOpts.CPlusPlus) {
+ TypeSourceInfo *TI = FD->getTypeSourceInfo();
+ TypeLoc TL = TI->getTypeLoc();
+ FunctionTypeLoc FTL = TL.castAs<FunctionTypeLoc>();
+ Diag(FTL.getLParenLoc(), diag::warn_strict_prototypes) << 1;
+ }
}
if (auto *MD = dyn_cast<CXXMethodDecl>(FD)) {
@@ -11637,6 +12035,9 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
return nullptr;
}
+ if (Body && getCurFunction()->HasPotentialAvailabilityViolations)
+ DiagnoseUnguardedAvailabilityViolations(dcl);
+
assert(!getCurFunction()->ObjCShouldCallSuper &&
"This should only be set for ObjC methods, which should have been "
"handled in the block above.");
@@ -11683,6 +12084,21 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (FD && FD->hasAttr<NakedAttr>()) {
for (const Stmt *S : Body->children()) {
+ // Allow local register variables without initializer as they don't
+ // require prologue.
+ bool RegisterVariables = false;
+ if (auto *DS = dyn_cast<DeclStmt>(S)) {
+ for (const auto *Decl : DS->decls()) {
+ if (const auto *Var = dyn_cast<VarDecl>(Decl)) {
+ RegisterVariables =
+ Var->hasAttr<AsmLabelAttr>() && !Var->hasInit();
+ if (!RegisterVariables)
+ break;
+ }
+ }
+ }
+ if (RegisterVariables)
+ continue;
if (!isa<AsmStmt>(S) && !isa<NullStmt>(S)) {
Diag(S->getLocStart(), diag::err_non_asm_stmt_in_naked_function);
Diag(FD->getAttr<NakedAttr>()->getLocation(), diag::note_attribute);
@@ -11796,6 +12212,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
+ /*DeclsInPrototype=*/None,
Loc, Loc, D),
DS.getAttributes(),
SourceLocation());
@@ -12069,6 +12486,31 @@ static bool isClassCompatTagKind(TagTypeKind Tag)
return Tag == TTK_Struct || Tag == TTK_Class || Tag == TTK_Interface;
}
+Sema::NonTagKind Sema::getNonTagTypeDeclKind(const Decl *PrevDecl,
+ TagTypeKind TTK) {
+ if (isa<TypedefDecl>(PrevDecl))
+ return NTK_Typedef;
+ else if (isa<TypeAliasDecl>(PrevDecl))
+ return NTK_TypeAlias;
+ else if (isa<ClassTemplateDecl>(PrevDecl))
+ return NTK_Template;
+ else if (isa<TypeAliasTemplateDecl>(PrevDecl))
+ return NTK_TypeAliasTemplate;
+ else if (isa<TemplateTemplateParmDecl>(PrevDecl))
+ return NTK_TemplateTemplateArgument;
+ switch (TTK) {
+ case TTK_Struct:
+ case TTK_Interface:
+ case TTK_Class:
+ return getLangOpts().CPlusPlus ? NTK_NonClass : NTK_NonStruct;
+ case TTK_Union:
+ return NTK_NonUnion;
+ case TTK_Enum:
+ return NTK_NonEnum;
+ }
+ llvm_unreachable("invalid TTK");
+}
+
/// \brief Determine whether a tag with a given kind is acceptable
/// as a redeclaration of the given tag declaration.
///
@@ -12226,28 +12668,6 @@ static bool isAcceptableTagRedeclContext(Sema &S, DeclContext *OldDC,
return false;
}
-/// Find the DeclContext in which a tag is implicitly declared if we see an
-/// elaborated type specifier in the specified context, and lookup finds
-/// nothing.
-static DeclContext *getTagInjectionContext(DeclContext *DC) {
- while (!DC->isFileContext() && !DC->isFunctionOrMethod())
- DC = DC->getParent();
- return DC;
-}
-
-/// Find the Scope in which a tag is implicitly declared if we see an
-/// elaborated type specifier in the specified context, and lookup finds
-/// nothing.
-static Scope *getTagInjectionScope(Scope *S, const LangOptions &LangOpts) {
- while (S->isClassScope() ||
- (LangOpts.CPlusPlus &&
- S->isFunctionPrototypeScope()) ||
- ((S->getFlags() & Scope::DeclScope) == 0) ||
- (S->getEntity() && S->getEntity()->isTransparentContext()))
- S = S->getParent();
- return S;
-}
-
/// \brief This is invoked when we see 'struct foo' or 'struct {'. In the
/// former case, Name will be non-null. In the later case, Name will be null.
/// TagSpec indicates what kind of tag this is. TUK indicates whether this is a
@@ -12361,6 +12781,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
DeclContext *SearchDC = CurContext;
DeclContext *DC = CurContext;
bool isStdBadAlloc = false;
+ bool isStdAlignValT = false;
RedeclarationKind Redecl = ForRedeclaration;
if (TUK == TUK_Friend || TUK == TUK_Reference)
@@ -12515,15 +12936,20 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
}
if (getLangOpts().CPlusPlus && Name && DC && StdNamespace &&
- DC->Equals(getStdNamespace()) && Name->isStr("bad_alloc")) {
- // This is a declaration of or a reference to "std::bad_alloc".
- isStdBadAlloc = true;
+ DC->Equals(getStdNamespace())) {
+ if (Name->isStr("bad_alloc")) {
+ // This is a declaration of or a reference to "std::bad_alloc".
+ isStdBadAlloc = true;
- if (Previous.empty() && StdBadAlloc) {
- // std::bad_alloc has been implicitly declared (but made invisible to
- // name lookup). Fill in this implicit declaration as the previous
+ // If std::bad_alloc has been implicitly declared (but made invisible to
+ // name lookup), fill in this implicit declaration as the previous
// declaration, so that the declarations get chained appropriately.
- Previous.addDecl(getStdBadAlloc());
+ if (Previous.empty() && StdBadAlloc)
+ Previous.addDecl(getStdBadAlloc());
+ } else if (Name->isStr("align_val_t")) {
+ isStdAlignValT = true;
+ if (Previous.empty() && StdAlignValT)
+ Previous.addDecl(getStdAlignValT());
}
}
@@ -12843,11 +13269,9 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// (non-redeclaration) lookup.
if ((TUK == TUK_Reference || TUK == TUK_Friend) &&
!Previous.isForRedeclaration()) {
- unsigned Kind = 0;
- if (isa<TypedefDecl>(PrevDecl)) Kind = 1;
- else if (isa<TypeAliasDecl>(PrevDecl)) Kind = 2;
- else if (isa<ClassTemplateDecl>(PrevDecl)) Kind = 3;
- Diag(NameLoc, diag::err_tag_reference_non_tag) << Kind;
+ NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
+ Diag(NameLoc, diag::err_tag_reference_non_tag) << PrevDecl << NTK
+ << Kind;
Diag(PrevDecl->getLocation(), diag::note_declared_at);
Invalid = true;
@@ -12858,11 +13282,8 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// Diagnose implicit declarations introduced by elaborated types.
} else if (TUK == TUK_Reference || TUK == TUK_Friend) {
- unsigned Kind = 0;
- if (isa<TypedefDecl>(PrevDecl)) Kind = 1;
- else if (isa<TypeAliasDecl>(PrevDecl)) Kind = 2;
- else if (isa<ClassTemplateDecl>(PrevDecl)) Kind = 3;
- Diag(NameLoc, diag::err_tag_reference_conflict) << Kind;
+ NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
+ Diag(NameLoc, diag::err_tag_reference_conflict) << NTK;
Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
Invalid = true;
@@ -12915,6 +13336,10 @@ CreateNewDecl:
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name,
cast_or_null<EnumDecl>(PrevDecl), ScopedEnum,
ScopedEnumUsesClassTag, !EnumUnderlying.isNull());
+
+ if (isStdAlignValT && (!StdAlignValT || getStdAlignValT()->isImplicit()))
+ StdAlignValT = cast<EnumDecl>(New);
+
// If this is an undefined enum, warn.
if (TUK != TUK_Definition && !Invalid) {
TagDecl *Def;
@@ -13047,7 +13472,6 @@ CreateNewDecl:
} else if (!PrevDecl) {
Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New);
}
- DeclsInPrototypeScope.push_back(New);
}
if (Invalid)
@@ -13112,7 +13536,14 @@ CreateNewDecl:
OwnedDecl = true;
// In C++, don't return an invalid declaration. We can't recover well from
// the cases where we make the type anonymous.
- return (Invalid && getLangOpts().CPlusPlus) ? nullptr : New;
+ if (Invalid && getLangOpts().CPlusPlus) {
+ if (New->isBeingDefined())
+ if (auto RD = dyn_cast<RecordDecl>(New))
+ RD->completeDefinition();
+ return nullptr;
+ } else {
+ return New;
+ }
}
void Sema::ActOnTagStartDefinition(Scope *S, Decl *TagD) {
@@ -13347,6 +13778,13 @@ FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
Declarator &D, Expr *BitWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS) {
+ if (D.isDecompositionDeclarator()) {
+ const DecompositionDeclarator &Decomp = D.getDecompositionDeclarator();
+ Diag(Decomp.getLSquareLoc(), diag::err_decomp_decl_context)
+ << Decomp.getSourceRange();
+ return nullptr;
+ }
+
IdentifierInfo *II = D.getIdentifier();
SourceLocation Loc = DeclStart;
if (II) Loc = D.getIdentifierLoc();
@@ -14140,6 +14578,14 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
if (!Completed)
Record->completeDefinition();
+ // We may have deferred checking for a deleted destructor. Check now.
+ if (CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) {
+ auto *Dtor = CXXRecord->getDestructor();
+ if (Dtor && Dtor->isImplicit() &&
+ ShouldDeleteSpecialMember(Dtor, CXXDestructor))
+ SetDeclDeleted(Dtor, CXXRecord->getLocation());
+ }
+
if (Record->hasAttrs()) {
CheckAlignasUnderalignment(Record);
@@ -15054,15 +15500,97 @@ static void checkModuleImportContext(Sema &S, Module *M,
} else if (!M->IsExternC && ExternCLoc.isValid()) {
S.Diag(ImportLoc, diag::ext_module_import_in_extern_c)
<< M->getFullModuleName();
- S.Diag(ExternCLoc, diag::note_module_import_in_extern_c);
+ S.Diag(ExternCLoc, diag::note_extern_c_begins_here);
+ }
+}
+
+Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation ModuleLoc,
+ ModuleDeclKind MDK,
+ ModuleIdPath Path) {
+ // 'module implementation' requires that we are not compiling a module of any
+ // kind. 'module' and 'module partition' require that we are compiling a
+ // module inteface (not a module map).
+ auto CMK = getLangOpts().getCompilingModule();
+ if (MDK == ModuleDeclKind::Implementation
+ ? CMK != LangOptions::CMK_None
+ : CMK != LangOptions::CMK_ModuleInterface) {
+ Diag(ModuleLoc, diag::err_module_interface_implementation_mismatch)
+ << (unsigned)MDK;
+ return nullptr;
+ }
+
+ // FIXME: Create a ModuleDecl and return it.
+
+ // FIXME: Most of this work should be done by the preprocessor rather than
+ // here, in case we look ahead across something where the current
+ // module matters (eg a #include).
+
+ // The dots in a module name in the Modules TS are a lie. Unlike Clang's
+ // hierarchical module map modules, the dots here are just another character
+ // that can appear in a module name. Flatten down to the actual module name.
+ std::string ModuleName;
+ for (auto &Piece : Path) {
+ if (!ModuleName.empty())
+ ModuleName += ".";
+ ModuleName += Piece.first->getName();
+ }
+
+ // If a module name was explicitly specified on the command line, it must be
+ // correct.
+ if (!getLangOpts().CurrentModule.empty() &&
+ getLangOpts().CurrentModule != ModuleName) {
+ Diag(Path.front().second, diag::err_current_module_name_mismatch)
+ << SourceRange(Path.front().second, Path.back().second)
+ << getLangOpts().CurrentModule;
+ return nullptr;
+ }
+ const_cast<LangOptions&>(getLangOpts()).CurrentModule = ModuleName;
+
+ auto &Map = PP.getHeaderSearchInfo().getModuleMap();
+
+ switch (MDK) {
+ case ModuleDeclKind::Module: {
+ // FIXME: Check we're not in a submodule.
+
+ // We can't have imported a definition of this module or parsed a module
+ // map defining it already.
+ if (auto *M = Map.findModule(ModuleName)) {
+ Diag(Path[0].second, diag::err_module_redefinition) << ModuleName;
+ if (M->DefinitionLoc.isValid())
+ Diag(M->DefinitionLoc, diag::note_prev_module_definition);
+ else if (const auto *FE = M->getASTFile())
+ Diag(M->DefinitionLoc, diag::note_prev_module_definition_from_ast_file)
+ << FE->getName();
+ return nullptr;
+ }
+
+ // Create a Module for the module that we're defining.
+ Module *Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName);
+ assert(Mod && "module creation should not fail");
+
+ // Enter the semantic scope of the module.
+ ActOnModuleBegin(ModuleLoc, Mod);
+ return nullptr;
+ }
+
+ case ModuleDeclKind::Partition:
+ // FIXME: Check we are in a submodule of the named module.
+ return nullptr;
+
+ case ModuleDeclKind::Implementation:
+ std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc(
+ PP.getIdentifierInfo(ModuleName), Path[0].second);
+
+ DeclResult Import = ActOnModuleImport(ModuleLoc, ModuleLoc, ModuleNameLoc);
+ if (Import.isInvalid())
+ return nullptr;
+ return ConvertDeclToDeclGroup(Import.get());
}
-}
-void Sema::diagnoseMisplacedModuleImport(Module *M, SourceLocation ImportLoc) {
- return checkModuleImportContext(*this, M, ImportLoc, CurContext);
+ llvm_unreachable("unexpected module decl kind");
}
-DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc,
+DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ImportLoc,
ModuleIdPath Path) {
Module *Mod =
@@ -15078,8 +15606,11 @@ DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc,
// FIXME: we should support importing a submodule within a different submodule
// of the same top-level module. Until we do, make it an error rather than
// silently ignoring the import.
- if (Mod->getTopLevelModuleName() == getLangOpts().CurrentModule)
- Diag(ImportLoc, getLangOpts().CompilingModule
+ // Import-from-implementation is valid in the Modules TS. FIXME: Should we
+ // warn on a redundant import of the current module?
+ if (Mod->getTopLevelModuleName() == getLangOpts().CurrentModule &&
+ (getLangOpts().isCompilingModule() || !getLangOpts().ModulesTS))
+ Diag(ImportLoc, getLangOpts().isCompilingModule()
? diag::err_module_self_import
: diag::err_module_import_in_implementation)
<< Mod->getFullModuleName() << getLangOpts().CurrentModule;
@@ -15096,17 +15627,21 @@ DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc,
IdentifierLocs.push_back(Path[I].second);
}
- ImportDecl *Import = ImportDecl::Create(Context,
- Context.getTranslationUnitDecl(),
- AtLoc.isValid()? AtLoc : ImportLoc,
+ TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
+ ImportDecl *Import = ImportDecl::Create(Context, TU, StartLoc,
Mod, IdentifierLocs);
- Context.getTranslationUnitDecl()->addDecl(Import);
+ if (!ModuleScopes.empty())
+ Context.addModuleInitializer(ModuleScopes.back().Module, Import);
+ TU->addDecl(Import);
return Import;
}
void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
+ BuildModuleInclude(DirectiveLoc, Mod);
+}
+void Sema::BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
// Determine whether we're in the #include buffer for a module. The #includes
// in that buffer do not qualify as module imports; they're just an
// implementation detail of us building the module.
@@ -15116,13 +15651,7 @@ void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
TUKind == TU_Module &&
getSourceManager().isWrittenInMainFile(DirectiveLoc);
- // Similarly, if we're in the implementation of a module, don't
- // synthesize an illegal module import. FIXME: Why not?
- bool ShouldAddImport =
- !IsInModuleIncludes &&
- (getLangOpts().CompilingModule ||
- getLangOpts().CurrentModule.empty() ||
- getLangOpts().CurrentModule != Mod->getTopLevelModuleName());
+ bool ShouldAddImport = !IsInModuleIncludes;
// If this module import was due to an inclusion directive, create an
// implicit import declaration to capture it in the AST.
@@ -15131,6 +15660,8 @@ void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
ImportDecl *ImportD = ImportDecl::CreateImplicit(getASTContext(), TU,
DirectiveLoc, Mod,
DirectiveLoc);
+ if (!ModuleScopes.empty())
+ Context.addModuleInitializer(ModuleScopes.back().Module, ImportD);
TU->addDecl(ImportD);
Consumer.HandleImplicitImportDecl(ImportD);
}
@@ -15140,24 +15671,35 @@ void Sema::ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
}
void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
- checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext);
+ checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext, true);
+ ModuleScopes.push_back({});
+ ModuleScopes.back().Module = Mod;
if (getLangOpts().ModulesLocalVisibility)
- VisibleModulesStack.push_back(std::move(VisibleModules));
+ ModuleScopes.back().OuterVisibleModules = std::move(VisibleModules);
+
VisibleModules.setVisible(Mod, DirectiveLoc);
}
-void Sema::ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod) {
- checkModuleImportContext(*this, Mod, DirectiveLoc, CurContext);
-
+void Sema::ActOnModuleEnd(SourceLocation EofLoc, Module *Mod) {
if (getLangOpts().ModulesLocalVisibility) {
- VisibleModules = std::move(VisibleModulesStack.back());
- VisibleModulesStack.pop_back();
- VisibleModules.setVisible(Mod, DirectiveLoc);
+ VisibleModules = std::move(ModuleScopes.back().OuterVisibleModules);
// Leaving a module hides namespace names, so our visible namespace cache
// is now out of date.
VisibleNamespaceCache.clear();
}
+
+ assert(!ModuleScopes.empty() && ModuleScopes.back().Module == Mod &&
+ "left the wrong module scope");
+ ModuleScopes.pop_back();
+
+ // We got to the end of processing a #include of a local module. Create an
+ // ImportDecl as we would for an imported module.
+ FileID File = getSourceManager().getFileID(EofLoc);
+ assert(File != getSourceManager().getMainFileID() &&
+ "end of submodule in main source file");
+ SourceLocation DirectiveLoc = getSourceManager().getIncludeLoc(File);
+ BuildModuleInclude(DirectiveLoc, Mod);
}
void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
@@ -15178,6 +15720,39 @@ void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
VisibleModules.setVisible(Mod, Loc);
}
+/// We have parsed the start of an export declaration, including the '{'
+/// (if present).
+Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
+ SourceLocation LBraceLoc) {
+ ExportDecl *D = ExportDecl::Create(Context, CurContext, ExportLoc);
+
+ // C++ Modules TS draft:
+ // An export-declaration [...] shall not contain more than one
+ // export keyword.
+ //
+ // The intent here is that an export-declaration cannot appear within another
+ // export-declaration.
+ if (D->isExported())
+ Diag(ExportLoc, diag::err_export_within_export);
+
+ CurContext->addDecl(D);
+ PushDeclContext(S, D);
+ return D;
+}
+
+/// Complete the definition of an export declaration.
+Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
+ auto *ED = cast<ExportDecl>(D);
+ if (RBraceLoc.isValid())
+ ED->setRBraceLoc(RBraceLoc);
+
+ // FIXME: Diagnose export of internal-linkage declaration (including
+ // anonymous namespace).
+
+ PopDeclContext();
+ return D;
+}
+
void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
@@ -15239,29 +15814,3 @@ void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name,
Decl *Sema::getObjCDeclContext() const {
return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
}
-
-AvailabilityResult Sema::getCurContextAvailability() const {
- const Decl *D = cast_or_null<Decl>(getCurObjCLexicalContext());
- if (!D)
- return AR_Available;
-
- // If we are within an Objective-C method, we should consult
- // both the availability of the method as well as the
- // enclosing class. If the class is (say) deprecated,
- // the entire method is considered deprecated from the
- // purpose of checking if the current context is deprecated.
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- AvailabilityResult R = MD->getAvailability();
- if (R != AR_Available)
- return R;
- D = MD->getClassInterface();
- }
- // If we are within an Objective-c @implementation, it
- // gets the same availability context as the @interface.
- else if (const ObjCImplementationDecl *ID =
- dyn_cast<ObjCImplementationDecl>(D)) {
- D = ID->getClassInterface();
- }
- // Recover from user error.
- return D ? D->getAvailability() : AR_Available;
-}
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index a5780a7d71fb..f9b6a91a300f 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -11,9 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
@@ -21,7 +21,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Mangle.h"
-#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
@@ -31,6 +31,7 @@
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/MathExtras.h"
@@ -245,6 +246,28 @@ static bool checkUInt32Argument(Sema &S, const AttributeList &Attr,
return true;
}
+/// \brief Wrapper around checkUInt32Argument, with an extra check to be sure
+/// that the result will fit into a regular (signed) int. All args have the same
+/// purpose as they do in checkUInt32Argument.
+static bool checkPositiveIntArgument(Sema &S, const AttributeList &Attr,
+ const Expr *Expr, int &Val,
+ unsigned Idx = UINT_MAX) {
+ uint32_t UVal;
+ if (!checkUInt32Argument(S, Attr, Expr, UVal, Idx))
+ return false;
+
+ if (UVal > (uint32_t)std::numeric_limits<int>::max()) {
+ llvm::APSInt I(32); // for toString
+ I = UVal;
+ S.Diag(Expr->getExprLoc(), diag::err_ice_too_large)
+ << I.toString(10, false) << 32 << /* Unsigned */ 0;
+ return false;
+ }
+
+ Val = UVal;
+ return true;
+}
+
/// \brief Diagnose mutually exclusive attributes when present on a given
/// declaration. Returns true if diagnosed.
template <typename AttrTy>
@@ -729,6 +752,69 @@ static void handleAssertExclusiveLockAttr(Sema &S, Decl *D,
Attr.getAttributeSpellingListIndex()));
}
+/// \brief Checks to be sure that the given parameter number is inbounds, and is
+/// an some integral type. Will emit appropriate diagnostics if this returns
+/// false.
+///
+/// FuncParamNo is expected to be from the user, so is base-1. AttrArgNo is used
+/// to actually retrieve the argument, so it's base-0.
+static bool checkParamIsIntegerType(Sema &S, const FunctionDecl *FD,
+ const AttributeList &Attr,
+ unsigned FuncParamNo, unsigned AttrArgNo) {
+ assert(Attr.isArgExpr(AttrArgNo) && "Expected expression argument");
+ uint64_t Idx;
+ if (!checkFunctionOrMethodParameterIndex(S, FD, Attr, FuncParamNo,
+ Attr.getArgAsExpr(AttrArgNo), Idx))
+ return false;
+
+ const ParmVarDecl *Param = FD->getParamDecl(Idx);
+ if (!Param->getType()->isIntegerType() && !Param->getType()->isCharType()) {
+ SourceLocation SrcLoc = Attr.getArgAsExpr(AttrArgNo)->getLocStart();
+ S.Diag(SrcLoc, diag::err_attribute_integers_only)
+ << Attr.getName() << Param->getSourceRange();
+ return false;
+ }
+ return true;
+}
+
+static void handleAllocSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1) ||
+ !checkAttributeAtMostNumArgs(S, Attr, 2))
+ return;
+
+ const auto *FD = cast<FunctionDecl>(D);
+ if (!FD->getReturnType()->isPointerType()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_return_pointers_only)
+ << Attr.getName();
+ return;
+ }
+
+ const Expr *SizeExpr = Attr.getArgAsExpr(0);
+ int SizeArgNo;
+ // Paramater indices are 1-indexed, hence Index=1
+ if (!checkPositiveIntArgument(S, Attr, SizeExpr, SizeArgNo, /*Index=*/1))
+ return;
+
+ if (!checkParamIsIntegerType(S, FD, Attr, SizeArgNo, /*AttrArgNo=*/0))
+ return;
+
+ // Args are 1-indexed, so 0 implies that the arg was not present
+ int NumberArgNo = 0;
+ if (Attr.getNumArgs() == 2) {
+ const Expr *NumberExpr = Attr.getArgAsExpr(1);
+ // Paramater indices are 1-based, hence Index=2
+ if (!checkPositiveIntArgument(S, Attr, NumberExpr, NumberArgNo,
+ /*Index=*/2))
+ return;
+
+ if (!checkParamIsIntegerType(S, FD, Attr, NumberArgNo, /*AttrArgNo=*/1))
+ return;
+ }
+
+ D->addAttr(::new (S.Context) AllocSizeAttr(
+ Attr.getRange(), S.Context, SizeArgNo, NumberArgNo,
+ Attr.getAttributeSpellingListIndex()));
+}
static bool checkTryLockFunAttrCommon(Sema &S, Decl *D,
const AttributeList &Attr,
@@ -824,8 +910,8 @@ static void handleEnableIfAttr(Sema &S, Decl *D, const AttributeList &Attr) {
!Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(D),
Diags)) {
S.Diag(Attr.getLoc(), diag::err_enable_if_never_constant_expr);
- for (int I = 0, N = Diags.size(); I != N; ++I)
- S.Diag(Diags[I].first, Diags[I].second);
+ for (const PartialDiagnosticAt &PDiag : Diags)
+ S.Diag(PDiag.first, PDiag.second);
return;
}
@@ -2803,20 +2889,21 @@ enum FormatAttrKind {
/// types.
static FormatAttrKind getFormatAttrKind(StringRef Format) {
return llvm::StringSwitch<FormatAttrKind>(Format)
- // Check for formats that get handled specially.
- .Case("NSString", NSStringFormat)
- .Case("CFString", CFStringFormat)
- .Case("strftime", StrftimeFormat)
+ // Check for formats that get handled specially.
+ .Case("NSString", NSStringFormat)
+ .Case("CFString", CFStringFormat)
+ .Case("strftime", StrftimeFormat)
- // Otherwise, check for supported formats.
- .Cases("scanf", "printf", "printf0", "strfmon", SupportedFormat)
- .Cases("cmn_err", "vcmn_err", "zcmn_err", SupportedFormat)
- .Case("kprintf", SupportedFormat) // OpenBSD.
- .Case("freebsd_kprintf", SupportedFormat) // FreeBSD.
- .Case("os_trace", SupportedFormat)
+ // Otherwise, check for supported formats.
+ .Cases("scanf", "printf", "printf0", "strfmon", SupportedFormat)
+ .Cases("cmn_err", "vcmn_err", "zcmn_err", SupportedFormat)
+ .Case("kprintf", SupportedFormat) // OpenBSD.
+ .Case("freebsd_kprintf", SupportedFormat) // FreeBSD.
+ .Case("os_trace", SupportedFormat)
+ .Case("os_log", SupportedFormat)
- .Cases("gcc_diag", "gcc_cdiag", "gcc_cxxdiag", "gcc_tdiag", IgnoredFormat)
- .Default(InvalidFormat);
+ .Cases("gcc_diag", "gcc_cdiag", "gcc_cxxdiag", "gcc_tdiag", IgnoredFormat)
+ .Default(InvalidFormat);
}
/// Handle __attribute__((init_priority(priority))) attributes based on
@@ -3043,10 +3130,14 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D,
return;
}
+ if (FirstType->isIncompleteType())
+ return;
uint64_t FirstSize = S.Context.getTypeSize(FirstType);
uint64_t FirstAlign = S.Context.getTypeAlign(FirstType);
for (; Field != FieldEnd; ++Field) {
QualType FieldType = Field->getType();
+ if (FieldType->isIncompleteType())
+ return;
// FIXME: this isn't fully correct; we also need to test whether the
// members of the union would all have the same calling convention as the
// first member of the union. Checking just the size and alignment isn't
@@ -3695,6 +3786,38 @@ static void handleOptimizeNoneAttr(Sema &S, Decl *D,
D->addAttr(Optnone);
}
+static void handleConstantAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<CUDASharedAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+ auto *VD = cast<VarDecl>(D);
+ if (!VD->hasGlobalStorage()) {
+ S.Diag(Attr.getLoc(), diag::err_cuda_nonglobal_constant);
+ return;
+ }
+ D->addAttr(::new (S.Context) CUDAConstantAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleSharedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (checkAttrMutualExclusion<CUDAConstantAttr>(S, D, Attr.getRange(),
+ Attr.getName()))
+ return;
+ auto *VD = cast<VarDecl>(D);
+ // extern __shared__ is only allowed on arrays with no length (e.g.
+ // "int x[]").
+ if (VD->hasExternalStorage() && !isa<IncompleteArrayType>(VD->getType())) {
+ S.Diag(Attr.getLoc(), diag::err_cuda_extern_shared) << VD;
+ return;
+ }
+ if (S.getLangOpts().CUDA && VD->hasLocalStorage() &&
+ S.CUDADiagIfHostCode(Attr.getLoc(), diag::err_cuda_host_shared)
+ << S.CurrentCUDATarget())
+ return;
+ D->addAttr(::new (S.Context) CUDASharedAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+}
+
static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (checkAttrMutualExclusion<CUDADeviceAttr>(S, D, Attr.getRange(),
Attr.getName()) ||
@@ -3801,6 +3924,10 @@ static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
SysVABIAttr(Attr.getRange(), S.Context,
Attr.getAttributeSpellingListIndex()));
return;
+ case AttributeList::AT_RegCall:
+ D->addAttr(::new (S.Context) RegCallAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
+ return;
case AttributeList::AT_Pcs: {
PcsAttr::PCSType PCS;
switch (CC) {
@@ -3862,6 +3989,7 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
case AttributeList::AT_Pascal: CC = CC_X86Pascal; break;
case AttributeList::AT_SwiftCall: CC = CC_Swift; break;
case AttributeList::AT_VectorCall: CC = CC_X86VectorCall; break;
+ case AttributeList::AT_RegCall: CC = CC_X86RegCall; break;
case AttributeList::AT_MSABI:
CC = Context.getTargetInfo().getTriple().isOSWindows() ? CC_C :
CC_X86_64Win64;
@@ -4603,6 +4731,19 @@ static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
// Microsoft specific attribute handlers.
//===----------------------------------------------------------------------===//
+UuidAttr *Sema::mergeUuidAttr(Decl *D, SourceRange Range,
+ unsigned AttrSpellingListIndex, StringRef Uuid) {
+ if (const auto *UA = D->getAttr<UuidAttr>()) {
+ if (UA->getGuid().equals_lower(Uuid))
+ return nullptr;
+ Diag(UA->getLocation(), diag::err_mismatched_uuid);
+ Diag(Range.getBegin(), diag::note_previous_uuid);
+ D->dropAttr<UuidAttr>();
+ }
+
+ return ::new (Context) UuidAttr(Range, Context, Uuid, AttrSpellingListIndex);
+}
+
static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (!S.LangOpts.CPlusPlus) {
S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_in_lang)
@@ -4610,12 +4751,6 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
return;
}
- if (!isa<CXXRecordDecl>(D)) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << ExpectedClass;
- return;
- }
-
StringRef StrRef;
SourceLocation LiteralLoc;
if (!S.checkStringLiteralArgumentAttr(Attr, 0, StrRef, &LiteralLoc))
@@ -4644,8 +4779,10 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
}
- D->addAttr(::new (S.Context) UuidAttr(Attr.getRange(), S.Context, StrRef,
- Attr.getAttributeSpellingListIndex()));
+ UuidAttr *UA = S.mergeUuidAttr(D, Attr.getRange(),
+ Attr.getAttributeSpellingListIndex(), StrRef);
+ if (UA)
+ D->addAttr(UA);
}
static void handleMSInheritanceAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -4925,29 +5062,85 @@ static void handleInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
}
-static void handleAMDGPUNumVGPRAttr(Sema &S, Decl *D,
+static void handleAMDGPUFlatWorkGroupSizeAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ uint32_t Min = 0;
+ Expr *MinExpr = Attr.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, Attr, MinExpr, Min))
+ return;
+
+ uint32_t Max = 0;
+ Expr *MaxExpr = Attr.getArgAsExpr(1);
+ if (!checkUInt32Argument(S, Attr, MaxExpr, Max))
+ return;
+
+ if (Min == 0 && Max != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_invalid)
+ << Attr.getName() << 0;
+ return;
+ }
+ if (Min > Max) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_invalid)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ AMDGPUFlatWorkGroupSizeAttr(Attr.getLoc(), S.Context, Min, Max,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAMDGPUWavesPerEUAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ uint32_t Min = 0;
+ Expr *MinExpr = Attr.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, Attr, MinExpr, Min))
+ return;
+
+ uint32_t Max = 0;
+ if (Attr.getNumArgs() == 2) {
+ Expr *MaxExpr = Attr.getArgAsExpr(1);
+ if (!checkUInt32Argument(S, Attr, MaxExpr, Max))
+ return;
+ }
+
+ if (Min == 0 && Max != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_invalid)
+ << Attr.getName() << 0;
+ return;
+ }
+ if (Max != 0 && Min > Max) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_invalid)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ D->addAttr(::new (S.Context)
+ AMDGPUWavesPerEUAttr(Attr.getLoc(), S.Context, Min, Max,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static void handleAMDGPUNumSGPRAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- uint32_t NumRegs;
- Expr *NumRegsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
- if (!checkUInt32Argument(S, Attr, NumRegsExpr, NumRegs))
+ uint32_t NumSGPR = 0;
+ Expr *NumSGPRExpr = Attr.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, Attr, NumSGPRExpr, NumSGPR))
return;
D->addAttr(::new (S.Context)
- AMDGPUNumVGPRAttr(Attr.getLoc(), S.Context,
- NumRegs,
+ AMDGPUNumSGPRAttr(Attr.getLoc(), S.Context, NumSGPR,
Attr.getAttributeSpellingListIndex()));
}
-static void handleAMDGPUNumSGPRAttr(Sema &S, Decl *D,
+static void handleAMDGPUNumVGPRAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
- uint32_t NumRegs;
- Expr *NumRegsExpr = static_cast<Expr *>(Attr.getArgAsExpr(0));
- if (!checkUInt32Argument(S, Attr, NumRegsExpr, NumRegs))
+ uint32_t NumVGPR = 0;
+ Expr *NumVGPRExpr = Attr.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, Attr, NumVGPRExpr, NumVGPR))
return;
D->addAttr(::new (S.Context)
- AMDGPUNumSGPRAttr(Attr.getLoc(), S.Context,
- NumRegs,
+ AMDGPUNumVGPRAttr(Attr.getLoc(), S.Context, NumVGPR,
Attr.getAttributeSpellingListIndex()));
}
@@ -5205,9 +5398,15 @@ static void handleDeprecatedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
!(Attr.hasScope() && Attr.getScopeName()->isStr("gnu")))
S.Diag(Attr.getLoc(), diag::ext_cxx14_attr) << Attr.getName();
- D->addAttr(::new (S.Context) DeprecatedAttr(Attr.getRange(), S.Context, Str,
- Replacement,
- Attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context)
+ DeprecatedAttr(Attr.getRange(), S.Context, Str, Replacement,
+ Attr.getAttributeSpellingListIndex()));
+}
+
+static bool isGlobalVar(const Decl *D) {
+ if (const auto *S = dyn_cast<VarDecl>(D))
+ return S->hasGlobalStorage();
+ return false;
}
static void handleNoSanitizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -5225,7 +5424,9 @@ static void handleNoSanitizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (parseSanitizerValue(SanitizerName, /*AllowGroups=*/true) == 0)
S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName;
-
+ else if (isGlobalVar(D) && SanitizerName != "address")
+ S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
Sanitizers.push_back(SanitizerName);
}
@@ -5238,12 +5439,14 @@ static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
StringRef AttrName = Attr.getName()->getName();
normalizeName(AttrName);
- StringRef SanitizerName =
- llvm::StringSwitch<StringRef>(AttrName)
- .Case("no_address_safety_analysis", "address")
- .Case("no_sanitize_address", "address")
- .Case("no_sanitize_thread", "thread")
- .Case("no_sanitize_memory", "memory");
+ StringRef SanitizerName = llvm::StringSwitch<StringRef>(AttrName)
+ .Case("no_address_safety_analysis", "address")
+ .Case("no_sanitize_address", "address")
+ .Case("no_sanitize_thread", "thread")
+ .Case("no_sanitize_memory", "memory");
+ if (isGlobalVar(D) && SanitizerName != "address")
+ S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
D->addAttr(::new (S.Context)
NoSanitizeAttr(Attr.getRange(), S.Context, &SanitizerName, 1,
Attr.getAttributeSpellingListIndex()));
@@ -5401,12 +5604,18 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_NoMips16:
handleSimpleAttribute<NoMips16Attr>(S, D, Attr);
break;
- case AttributeList::AT_AMDGPUNumVGPR:
- handleAMDGPUNumVGPRAttr(S, D, Attr);
+ case AttributeList::AT_AMDGPUFlatWorkGroupSize:
+ handleAMDGPUFlatWorkGroupSizeAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_AMDGPUWavesPerEU:
+ handleAMDGPUWavesPerEUAttr(S, D, Attr);
break;
case AttributeList::AT_AMDGPUNumSGPR:
handleAMDGPUNumSGPRAttr(S, D, Attr);
break;
+ case AttributeList::AT_AMDGPUNumVGPR:
+ handleAMDGPUNumVGPRAttr(S, D, Attr);
+ break;
case AttributeList::AT_IBAction:
handleSimpleAttribute<IBActionAttr>(S, D, Attr);
break;
@@ -5428,6 +5637,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_AlignValue:
handleAlignValueAttr(S, D, Attr);
break;
+ case AttributeList::AT_AllocSize:
+ handleAllocSizeAttr(S, D, Attr);
+ break;
case AttributeList::AT_AlwaysInline:
handleAlwaysInlineAttr(S, D, Attr);
break;
@@ -5450,8 +5662,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleCommonAttr(S, D, Attr);
break;
case AttributeList::AT_CUDAConstant:
- handleSimpleAttributeWithExclusions<CUDAConstantAttr, CUDASharedAttr>(S, D,
- Attr);
+ handleConstantAttr(S, D, Attr);
break;
case AttributeList::AT_PassObjectSize:
handlePassObjectSizeAttr(S, D, Attr);
@@ -5561,8 +5772,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleSimpleAttribute<NoThrowAttr>(S, D, Attr);
break;
case AttributeList::AT_CUDAShared:
- handleSimpleAttributeWithExclusions<CUDASharedAttr, CUDAConstantAttr>(S, D,
- Attr);
+ handleSharedAttr(S, D, Attr);
break;
case AttributeList::AT_VecReturn:
handleVecReturnAttr(S, D, Attr);
@@ -5629,6 +5839,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_VecTypeHint:
handleVecTypeHint(S, D, Attr);
break;
+ case AttributeList::AT_RequireConstantInit:
+ handleSimpleAttribute<RequireConstantInitAttr>(S, D, Attr);
+ break;
case AttributeList::AT_InitPriority:
handleInitPriorityAttr(S, D, Attr);
break;
@@ -5650,6 +5863,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_ObjCRootClass:
handleSimpleAttribute<ObjCRootClassAttr>(S, D, Attr);
break;
+ case AttributeList::AT_ObjCSubclassingRestricted:
+ handleSimpleAttribute<ObjCSubclassingRestrictedAttr>(S, D, Attr);
+ break;
case AttributeList::AT_ObjCExplicitProtocolImpl:
handleObjCSuppresProtocolAttr(S, D, Attr);
break;
@@ -5728,6 +5944,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_NoDuplicate:
handleSimpleAttribute<NoDuplicateAttr>(S, D, Attr);
break;
+ case AttributeList::AT_Convergent:
+ handleSimpleAttribute<ConvergentAttr>(S, D, Attr);
+ break;
case AttributeList::AT_NoInline:
handleSimpleAttribute<NoInlineAttr>(S, D, Attr);
break;
@@ -5739,6 +5958,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_FastCall:
case AttributeList::AT_ThisCall:
case AttributeList::AT_Pascal:
+ case AttributeList::AT_RegCall:
case AttributeList::AT_SwiftCall:
case AttributeList::AT_VectorCall:
case AttributeList::AT_MSABI:
@@ -5955,7 +6175,11 @@ void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
} else if (Attr *A = D->getAttr<VecTypeHintAttr>()) {
Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
D->setInvalidDecl();
- } else if (Attr *A = D->getAttr<AMDGPUNumVGPRAttr>()) {
+ } else if (Attr *A = D->getAttr<AMDGPUFlatWorkGroupSizeAttr>()) {
+ Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << A << ExpectedKernelFunction;
+ D->setInvalidDecl();
+ } else if (Attr *A = D->getAttr<AMDGPUWavesPerEUAttr>()) {
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
<< A << ExpectedKernelFunction;
D->setInvalidDecl();
@@ -5963,6 +6187,10 @@ void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
<< A << ExpectedKernelFunction;
D->setInvalidDecl();
+ } else if (Attr *A = D->getAttr<AMDGPUNumVGPRAttr>()) {
+ Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
+ << A << ExpectedKernelFunction;
+ D->setInvalidDecl();
}
}
}
@@ -6194,30 +6422,6 @@ static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
diag.Triggered = true;
}
-static bool isDeclDeprecated(Decl *D) {
- do {
- if (D->isDeprecated())
- return true;
- // A category implicitly has the availability of the interface.
- if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D))
- if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
- return Interface->isDeprecated();
- } while ((D = cast_or_null<Decl>(D->getDeclContext())));
- return false;
-}
-
-static bool isDeclUnavailable(Decl *D) {
- do {
- if (D->isUnavailable())
- return true;
- // A category implicitly has the availability of the interface.
- if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D))
- if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
- return Interface->isUnavailable();
- } while ((D = cast_or_null<Decl>(D->getDeclContext())));
- return false;
-}
-
static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
const Decl *D) {
// Check each AvailabilityAttr to find the one for this platform.
@@ -6246,7 +6450,72 @@ static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
return nullptr;
}
-static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K,
+/// \brief whether we should emit a diagnostic for \c K and \c DeclVersion in
+/// the context of \c Ctx. For example, we should emit an unavailable diagnostic
+/// in a deprecated context, but not the other way around.
+static bool ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
+ VersionTuple DeclVersion,
+ Decl *Ctx) {
+ assert(K != AR_Available && "Expected an unavailable declaration here!");
+
+ // Checks if we should emit the availability diagnostic in the context of C.
+ auto CheckContext = [&](const Decl *C) {
+ if (K == AR_NotYetIntroduced) {
+ if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, C))
+ if (AA->getIntroduced() >= DeclVersion)
+ return true;
+ } else if (K == AR_Deprecated)
+ if (C->isDeprecated())
+ return true;
+
+ if (C->isUnavailable())
+ return true;
+ return false;
+ };
+
+ // FIXME: This is a temporary workaround! Some existing Apple headers depends
+ // on nested declarations in an @interface having the availability of the
+ // interface when they really shouldn't: they are members of the enclosing
+ // context, and can referenced from there.
+ if (S.OriginalLexicalContext && cast<Decl>(S.OriginalLexicalContext) != Ctx) {
+ auto *OrigCtx = cast<Decl>(S.OriginalLexicalContext);
+ if (CheckContext(OrigCtx))
+ return false;
+
+ // An implementation implicitly has the availability of the interface.
+ if (auto *CatOrImpl = dyn_cast<ObjCImplDecl>(OrigCtx)) {
+ if (const ObjCInterfaceDecl *Interface = CatOrImpl->getClassInterface())
+ if (CheckContext(Interface))
+ return false;
+ }
+ // A category implicitly has the availability of the interface.
+ else if (auto *CatD = dyn_cast<ObjCCategoryDecl>(OrigCtx))
+ if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
+ if (CheckContext(Interface))
+ return false;
+ }
+
+ do {
+ if (CheckContext(Ctx))
+ return false;
+
+ // An implementation implicitly has the availability of the interface.
+ if (auto *CatOrImpl = dyn_cast<ObjCImplDecl>(Ctx)) {
+ if (const ObjCInterfaceDecl *Interface = CatOrImpl->getClassInterface())
+ if (CheckContext(Interface))
+ return false;
+ }
+ // A category implicitly has the availability of the interface.
+ else if (auto *CatD = dyn_cast<ObjCCategoryDecl>(Ctx))
+ if (const ObjCInterfaceDecl *Interface = CatD->getClassInterface())
+ if (CheckContext(Interface))
+ return false;
+ } while ((Ctx = cast_or_null<Decl>(Ctx->getDeclContext())));
+
+ return true;
+}
+
+static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
Decl *Ctx, const NamedDecl *D,
StringRef Message, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
@@ -6262,11 +6531,15 @@ static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K,
// Matches diag::note_availability_specified_here.
unsigned available_here_select_kind;
- // Don't warn if our current context is deprecated or unavailable.
+ VersionTuple DeclVersion;
+ if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, D))
+ DeclVersion = AA->getIntroduced();
+
+ if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx))
+ return;
+
switch (K) {
- case Sema::AD_Deprecation:
- if (isDeclDeprecated(Ctx) || isDeclUnavailable(Ctx))
- return;
+ case AR_Deprecated:
diag = !ObjCPropertyAccess ? diag::warn_deprecated
: diag::warn_property_method_deprecated;
diag_message = diag::warn_deprecated_message;
@@ -6275,9 +6548,7 @@ static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K,
available_here_select_kind = /* deprecated */ 2;
break;
- case Sema::AD_Unavailable:
- if (isDeclUnavailable(Ctx))
- return;
+ case AR_Unavailable:
diag = !ObjCPropertyAccess ? diag::err_unavailable
: diag::err_property_method_unavailable;
diag_message = diag::err_unavailable_message;
@@ -6329,18 +6600,21 @@ static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K,
}
break;
- case Sema::AD_Partial:
+ case AR_NotYetIntroduced:
diag = diag::warn_partial_availability;
diag_message = diag::warn_partial_message;
diag_fwdclass_message = diag::warn_partial_fwdclass_message;
property_note_select = /* partial */ 2;
available_here_select_kind = /* partial */ 3;
break;
+
+ case AR_Available:
+ llvm_unreachable("Warning for availability of available declaration?");
}
CharSourceRange UseRange;
StringRef Replacement;
- if (K == Sema::AD_Deprecation) {
+ if (K == AR_Deprecated) {
if (auto attr = D->getAttr<DeprecatedAttr>())
Replacement = attr->getReplacement();
if (auto attr = getAttrForPlatform(S.Context, D))
@@ -6393,21 +6667,20 @@ static void DoEmitAvailabilityWarning(Sema &S, Sema::AvailabilityDiagnostic K,
S.Diag(D->getLocation(), diag_available_here)
<< D << available_here_select_kind;
- if (K == Sema::AD_Partial)
+ if (K == AR_NotYetIntroduced)
S.Diag(Loc, diag::note_partial_availability_silence) << D;
}
static void handleDelayedAvailabilityCheck(Sema &S, DelayedDiagnostic &DD,
Decl *Ctx) {
- assert(DD.Kind == DelayedDiagnostic::Deprecation ||
- DD.Kind == DelayedDiagnostic::Unavailable);
- Sema::AvailabilityDiagnostic AD = DD.Kind == DelayedDiagnostic::Deprecation
- ? Sema::AD_Deprecation
- : Sema::AD_Unavailable;
+ assert(DD.Kind == DelayedDiagnostic::Availability &&
+ "Expected an availability diagnostic here");
+
DD.Triggered = true;
DoEmitAvailabilityWarning(
- S, AD, Ctx, DD.getDeprecationDecl(), DD.getDeprecationMessage(), DD.Loc,
- DD.getUnknownObjCClass(), DD.getObjCProperty(), false);
+ S, DD.getAvailabilityResult(), Ctx, DD.getAvailabilityDecl(),
+ DD.getAvailabilityMessage(), DD.Loc, DD.getUnknownObjCClass(),
+ DD.getObjCProperty(), false);
}
void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
@@ -6437,8 +6710,7 @@ void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
continue;
switch (diag.Kind) {
- case DelayedDiagnostic::Deprecation:
- case DelayedDiagnostic::Unavailable:
+ case DelayedDiagnostic::Availability:
// Don't bother giving deprecation/unavailable diagnostics if
// the decl is invalid.
if (!decl->isInvalidDecl())
@@ -6466,21 +6738,173 @@ void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) {
curPool->steal(pool);
}
-void Sema::EmitAvailabilityWarning(AvailabilityDiagnostic AD,
+void Sema::EmitAvailabilityWarning(AvailabilityResult AR,
NamedDecl *D, StringRef Message,
SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
bool ObjCPropertyAccess) {
// Delay if we're currently parsing a declaration.
- if (DelayedDiagnostics.shouldDelayDiagnostics() && AD != AD_Partial) {
+ if (DelayedDiagnostics.shouldDelayDiagnostics()) {
DelayedDiagnostics.add(DelayedDiagnostic::makeAvailability(
- AD, Loc, D, UnknownObjCClass, ObjCProperty, Message,
+ AR, Loc, D, UnknownObjCClass, ObjCProperty, Message,
ObjCPropertyAccess));
return;
}
Decl *Ctx = cast<Decl>(getCurLexicalContext());
- DoEmitAvailabilityWarning(*this, AD, Ctx, D, Message, Loc, UnknownObjCClass,
+ DoEmitAvailabilityWarning(*this, AR, Ctx, D, Message, Loc, UnknownObjCClass,
ObjCProperty, ObjCPropertyAccess);
}
+
+namespace {
+
+/// \brief This class implements -Wunguarded-availability.
+///
+/// This is done with a traversal of the AST of a function that makes reference
+/// to a partially available declaration. Whenever we encounter an \c if of the
+/// form: \c if(@available(...)), we use the version from the condition to visit
+/// the then statement.
+class DiagnoseUnguardedAvailability
+ : public RecursiveASTVisitor<DiagnoseUnguardedAvailability> {
+ typedef RecursiveASTVisitor<DiagnoseUnguardedAvailability> Base;
+
+ Sema &SemaRef;
+ Decl *Ctx;
+
+ /// Stack of potentially nested 'if (@available(...))'s.
+ SmallVector<VersionTuple, 8> AvailabilityStack;
+
+ void DiagnoseDeclAvailability(NamedDecl *D, SourceRange Range);
+
+public:
+ DiagnoseUnguardedAvailability(Sema &SemaRef, Decl *Ctx)
+ : SemaRef(SemaRef), Ctx(Ctx) {
+ AvailabilityStack.push_back(
+ SemaRef.Context.getTargetInfo().getPlatformMinVersion());
+ }
+
+ void IssueDiagnostics(Stmt *S) { TraverseStmt(S); }
+
+ bool TraverseIfStmt(IfStmt *If);
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *Msg) {
+ if (ObjCMethodDecl *D = Msg->getMethodDecl())
+ DiagnoseDeclAvailability(
+ D, SourceRange(Msg->getSelectorStartLoc(), Msg->getLocEnd()));
+ return true;
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ DiagnoseDeclAvailability(DRE->getDecl(),
+ SourceRange(DRE->getLocStart(), DRE->getLocEnd()));
+ return true;
+ }
+
+ bool VisitMemberExpr(MemberExpr *ME) {
+ DiagnoseDeclAvailability(ME->getMemberDecl(),
+ SourceRange(ME->getLocStart(), ME->getLocEnd()));
+ return true;
+ }
+
+ bool VisitTypeLoc(TypeLoc Ty);
+};
+
+void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
+ NamedDecl *D, SourceRange Range) {
+
+ VersionTuple ContextVersion = AvailabilityStack.back();
+ if (AvailabilityResult Result =
+ SemaRef.ShouldDiagnoseAvailabilityOfDecl(D, nullptr)) {
+ // All other diagnostic kinds have already been handled in
+ // DiagnoseAvailabilityOfDecl.
+ if (Result != AR_NotYetIntroduced)
+ return;
+
+ const AvailabilityAttr *AA = getAttrForPlatform(SemaRef.getASTContext(), D);
+ VersionTuple Introduced = AA->getIntroduced();
+
+ if (ContextVersion >= Introduced)
+ return;
+
+ // If the context of this function is less available than D, we should not
+ // emit a diagnostic.
+ if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced, Ctx))
+ return;
+
+ SemaRef.Diag(Range.getBegin(), diag::warn_unguarded_availability)
+ << Range << D
+ << AvailabilityAttr::getPrettyPlatformName(
+ SemaRef.getASTContext().getTargetInfo().getPlatformName())
+ << Introduced.getAsString();
+
+ SemaRef.Diag(D->getLocation(), diag::note_availability_specified_here)
+ << D << /* partial */ 3;
+
+ // FIXME: Replace this with a fixit diagnostic.
+ SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
+ << Range << D;
+ }
+}
+
+bool DiagnoseUnguardedAvailability::VisitTypeLoc(TypeLoc Ty) {
+ const Type *TyPtr = Ty.getTypePtr();
+ SourceRange Range{Ty.getBeginLoc(), Ty.getEndLoc()};
+
+ if (const TagType *TT = dyn_cast<TagType>(TyPtr)) {
+ TagDecl *TD = TT->getDecl();
+ DiagnoseDeclAvailability(TD, Range);
+
+ } else if (const TypedefType *TD = dyn_cast<TypedefType>(TyPtr)) {
+ TypedefNameDecl *D = TD->getDecl();
+ DiagnoseDeclAvailability(D, Range);
+
+ } else if (const auto *ObjCO = dyn_cast<ObjCObjectType>(TyPtr)) {
+ if (NamedDecl *D = ObjCO->getInterface())
+ DiagnoseDeclAvailability(D, Range);
+ }
+
+ return true;
+}
+
+bool DiagnoseUnguardedAvailability::TraverseIfStmt(IfStmt *If) {
+ VersionTuple CondVersion;
+ if (auto *E = dyn_cast<ObjCAvailabilityCheckExpr>(If->getCond())) {
+ CondVersion = E->getVersion();
+
+ // If we're using the '*' case here or if this check is redundant, then we
+ // use the enclosing version to check both branches.
+ if (CondVersion.empty() || CondVersion <= AvailabilityStack.back())
+ return Base::TraverseStmt(If->getThen()) &&
+ Base::TraverseStmt(If->getElse());
+ } else {
+ // This isn't an availability checking 'if', we can just continue.
+ return Base::TraverseIfStmt(If);
+ }
+
+ AvailabilityStack.push_back(CondVersion);
+ bool ShouldContinue = TraverseStmt(If->getThen());
+ AvailabilityStack.pop_back();
+
+ return ShouldContinue && TraverseStmt(If->getElse());
+}
+
+} // end anonymous namespace
+
+void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
+ Stmt *Body = nullptr;
+
+ if (auto *FD = D->getAsFunction()) {
+ // FIXME: We only examine the pattern decl for availability violations now,
+ // but we should also examine instantiated templates.
+ if (FD->isTemplateInstantiation())
+ return;
+
+ Body = FD->getBody();
+ } else if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
+ Body = MD->getBody();
+
+ assert(Body && "Need a body here!");
+
+ DiagnoseUnguardedAvailability(*this, D).IssueDiagnostics(Body);
+}
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index e161c87f1739..084bd4c45eda 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
@@ -36,9 +35,11 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include <map>
#include <set>
@@ -394,7 +395,8 @@ void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
++argIdx) {
ParmVarDecl *Param = cast<ParmVarDecl>(chunk.Fun.Params[argIdx].Param);
if (Param->hasUnparsedDefaultArg()) {
- CachedTokens *Toks = chunk.Fun.Params[argIdx].DefaultArgTokens;
+ std::unique_ptr<CachedTokens> Toks =
+ std::move(chunk.Fun.Params[argIdx].DefaultArgTokens);
SourceRange SR;
if (Toks->size() > 1)
SR = SourceRange((*Toks)[1].getLocation(),
@@ -403,8 +405,6 @@ void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
SR = UnparsedDefaultArgLocs[Param];
Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
<< SR;
- delete Toks;
- chunk.Fun.Params[argIdx].DefaultArgTokens = nullptr;
} else if (Param->getDefaultArg()) {
Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
<< Param->getDefaultArg()->getSourceRange();
@@ -658,12 +658,773 @@ bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
Invalid = true;
}
- if (CheckEquivalentExceptionSpec(Old, New))
- Invalid = true;
-
return Invalid;
}
+NamedDecl *
+Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
+ MultiTemplateParamsArg TemplateParamLists) {
+ assert(D.isDecompositionDeclarator());
+ const DecompositionDeclarator &Decomp = D.getDecompositionDeclarator();
+
+ // The syntax only allows a decomposition declarator as a simple-declaration
+ // or a for-range-declaration, but we parse it in more cases than that.
+ if (!D.mayHaveDecompositionDeclarator()) {
+ Diag(Decomp.getLSquareLoc(), diag::err_decomp_decl_context)
+ << Decomp.getSourceRange();
+ return nullptr;
+ }
+
+ if (!TemplateParamLists.empty()) {
+ // FIXME: There's no rule against this, but there are also no rules that
+ // would actually make it usable, so we reject it for now.
+ Diag(TemplateParamLists.front()->getTemplateLoc(),
+ diag::err_decomp_decl_template);
+ return nullptr;
+ }
+
+ Diag(Decomp.getLSquareLoc(), getLangOpts().CPlusPlus1z
+ ? diag::warn_cxx14_compat_decomp_decl
+ : diag::ext_decomp_decl)
+ << Decomp.getSourceRange();
+
+ // The semantic context is always just the current context.
+ DeclContext *const DC = CurContext;
+
+ // C++1z [dcl.dcl]/8:
+ // The decl-specifier-seq shall contain only the type-specifier auto
+ // and cv-qualifiers.
+ auto &DS = D.getDeclSpec();
+ {
+ SmallVector<StringRef, 8> BadSpecifiers;
+ SmallVector<SourceLocation, 8> BadSpecifierLocs;
+ if (auto SCS = DS.getStorageClassSpec()) {
+ BadSpecifiers.push_back(DeclSpec::getSpecifierName(SCS));
+ BadSpecifierLocs.push_back(DS.getStorageClassSpecLoc());
+ }
+ if (auto TSCS = DS.getThreadStorageClassSpec()) {
+ BadSpecifiers.push_back(DeclSpec::getSpecifierName(TSCS));
+ BadSpecifierLocs.push_back(DS.getThreadStorageClassSpecLoc());
+ }
+ if (DS.isConstexprSpecified()) {
+ BadSpecifiers.push_back("constexpr");
+ BadSpecifierLocs.push_back(DS.getConstexprSpecLoc());
+ }
+ if (DS.isInlineSpecified()) {
+ BadSpecifiers.push_back("inline");
+ BadSpecifierLocs.push_back(DS.getInlineSpecLoc());
+ }
+ if (!BadSpecifiers.empty()) {
+ auto &&Err = Diag(BadSpecifierLocs.front(), diag::err_decomp_decl_spec);
+ Err << (int)BadSpecifiers.size()
+ << llvm::join(BadSpecifiers.begin(), BadSpecifiers.end(), " ");
+ // Don't add FixItHints to remove the specifiers; we do still respect
+ // them when building the underlying variable.
+ for (auto Loc : BadSpecifierLocs)
+ Err << SourceRange(Loc, Loc);
+ }
+ // We can't recover from it being declared as a typedef.
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef)
+ return nullptr;
+ }
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType R = TInfo->getType();
+
+ if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_DeclarationType))
+ D.setInvalidType();
+
+ // The syntax only allows a single ref-qualifier prior to the decomposition
+ // declarator. No other declarator chunks are permitted. Also check the type
+ // specifier here.
+ if (DS.getTypeSpecType() != DeclSpec::TST_auto ||
+ D.hasGroupingParens() || D.getNumTypeObjects() > 1 ||
+ (D.getNumTypeObjects() == 1 &&
+ D.getTypeObject(0).Kind != DeclaratorChunk::Reference)) {
+ Diag(Decomp.getLSquareLoc(),
+ (D.hasGroupingParens() ||
+ (D.getNumTypeObjects() &&
+ D.getTypeObject(0).Kind == DeclaratorChunk::Paren))
+ ? diag::err_decomp_decl_parens
+ : diag::err_decomp_decl_type)
+ << R;
+
+ // In most cases, there's no actual problem with an explicitly-specified
+ // type, but a function type won't work here, and ActOnVariableDeclarator
+ // shouldn't be called for such a type.
+ if (R->isFunctionType())
+ D.setInvalidType();
+ }
+
+ // Build the BindingDecls.
+ SmallVector<BindingDecl*, 8> Bindings;
+
+ // Build the BindingDecls.
+ for (auto &B : D.getDecompositionDeclarator().bindings()) {
+ // Check for name conflicts.
+ DeclarationNameInfo NameInfo(B.Name, B.NameLoc);
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
+ ForRedeclaration);
+ LookupName(Previous, S,
+ /*CreateBuiltins*/DC->getRedeclContext()->isTranslationUnit());
+
+ // It's not permitted to shadow a template parameter name.
+ if (Previous.isSingleResult() &&
+ Previous.getFoundDecl()->isTemplateParameter()) {
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(),
+ Previous.getFoundDecl());
+ Previous.clear();
+ }
+
+ bool ConsiderLinkage = DC->isFunctionOrMethod() &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_extern;
+ FilterLookupForScope(Previous, DC, S, ConsiderLinkage,
+ /*AllowInlineNamespace*/false);
+ if (!Previous.empty()) {
+ auto *Old = Previous.getRepresentativeDecl();
+ Diag(B.NameLoc, diag::err_redefinition) << B.Name;
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ }
+
+ auto *BD = BindingDecl::Create(Context, DC, B.NameLoc, B.Name);
+ PushOnScopeChains(BD, S, true);
+ Bindings.push_back(BD);
+ ParsingInitForAutoVars.insert(BD);
+ }
+
+ // There are no prior lookup results for the variable itself, because it
+ // is unnamed.
+ DeclarationNameInfo NameInfo((IdentifierInfo *)nullptr,
+ Decomp.getLSquareLoc());
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName, ForRedeclaration);
+
+ // Build the variable that holds the non-decomposed object.
+ bool AddToScope = true;
+ NamedDecl *New =
+ ActOnVariableDeclarator(S, D, DC, TInfo, Previous,
+ MultiTemplateParamsArg(), AddToScope, Bindings);
+ CurContext->addHiddenDecl(New);
+
+ if (isInOpenMPDeclareTargetContext())
+ checkDeclIsAllowedInOpenMPTarget(nullptr, New);
+
+ return New;
+}
+
+static bool checkSimpleDecomposition(
+ Sema &S, ArrayRef<BindingDecl *> Bindings, ValueDecl *Src,
+ QualType DecompType, const llvm::APSInt &NumElems, QualType ElemType,
+ llvm::function_ref<ExprResult(SourceLocation, Expr *, unsigned)> GetInit) {
+ if ((int64_t)Bindings.size() != NumElems) {
+ S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings)
+ << DecompType << (unsigned)Bindings.size() << NumElems.toString(10)
+ << (NumElems < Bindings.size());
+ return true;
+ }
+
+ unsigned I = 0;
+ for (auto *B : Bindings) {
+ SourceLocation Loc = B->getLocation();
+ ExprResult E = S.BuildDeclRefExpr(Src, DecompType, VK_LValue, Loc);
+ if (E.isInvalid())
+ return true;
+ E = GetInit(Loc, E.get(), I++);
+ if (E.isInvalid())
+ return true;
+ B->setBinding(ElemType, E.get());
+ }
+
+ return false;
+}
+
+static bool checkArrayLikeDecomposition(Sema &S,
+ ArrayRef<BindingDecl *> Bindings,
+ ValueDecl *Src, QualType DecompType,
+ const llvm::APSInt &NumElems,
+ QualType ElemType) {
+ return checkSimpleDecomposition(
+ S, Bindings, Src, DecompType, NumElems, ElemType,
+ [&](SourceLocation Loc, Expr *Base, unsigned I) -> ExprResult {
+ ExprResult E = S.ActOnIntegerConstant(Loc, I);
+ if (E.isInvalid())
+ return ExprError();
+ return S.CreateBuiltinArraySubscriptExpr(Base, Loc, E.get(), Loc);
+ });
+}
+
+static bool checkArrayDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
+ ValueDecl *Src, QualType DecompType,
+ const ConstantArrayType *CAT) {
+ return checkArrayLikeDecomposition(S, Bindings, Src, DecompType,
+ llvm::APSInt(CAT->getSize()),
+ CAT->getElementType());
+}
+
+static bool checkVectorDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
+ ValueDecl *Src, QualType DecompType,
+ const VectorType *VT) {
+ return checkArrayLikeDecomposition(
+ S, Bindings, Src, DecompType, llvm::APSInt::get(VT->getNumElements()),
+ S.Context.getQualifiedType(VT->getElementType(),
+ DecompType.getQualifiers()));
+}
+
+static bool checkComplexDecomposition(Sema &S,
+ ArrayRef<BindingDecl *> Bindings,
+ ValueDecl *Src, QualType DecompType,
+ const ComplexType *CT) {
+ return checkSimpleDecomposition(
+ S, Bindings, Src, DecompType, llvm::APSInt::get(2),
+ S.Context.getQualifiedType(CT->getElementType(),
+ DecompType.getQualifiers()),
+ [&](SourceLocation Loc, Expr *Base, unsigned I) -> ExprResult {
+ return S.CreateBuiltinUnaryOp(Loc, I ? UO_Imag : UO_Real, Base);
+ });
+}
+
+static std::string printTemplateArgs(const PrintingPolicy &PrintingPolicy,
+ TemplateArgumentListInfo &Args) {
+ SmallString<128> SS;
+ llvm::raw_svector_ostream OS(SS);
+ bool First = true;
+ for (auto &Arg : Args.arguments()) {
+ if (!First)
+ OS << ", ";
+ Arg.getArgument().print(PrintingPolicy, OS);
+ First = false;
+ }
+ return OS.str();
+}
+
+static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
+ SourceLocation Loc, StringRef Trait,
+ TemplateArgumentListInfo &Args,
+ unsigned DiagID) {
+ auto DiagnoseMissing = [&] {
+ if (DiagID)
+ S.Diag(Loc, DiagID) << printTemplateArgs(S.Context.getPrintingPolicy(),
+ Args);
+ return true;
+ };
+
+ // FIXME: Factor out duplication with lookupPromiseType in SemaCoroutine.
+ NamespaceDecl *Std = S.getStdNamespace();
+ if (!Std)
+ return DiagnoseMissing();
+
+ // Look up the trait itself, within namespace std. We can diagnose various
+ // problems with this lookup even if we've been asked to not diagnose a
+ // missing specialization, because this can only fail if the user has been
+ // declaring their own names in namespace std or we don't support the
+ // standard library implementation in use.
+ LookupResult Result(S, &S.PP.getIdentifierTable().get(Trait),
+ Loc, Sema::LookupOrdinaryName);
+ if (!S.LookupQualifiedName(Result, Std))
+ return DiagnoseMissing();
+ if (Result.isAmbiguous())
+ return true;
+
+ ClassTemplateDecl *TraitTD = Result.getAsSingle<ClassTemplateDecl>();
+ if (!TraitTD) {
+ Result.suppressDiagnostics();
+ NamedDecl *Found = *Result.begin();
+ S.Diag(Loc, diag::err_std_type_trait_not_class_template) << Trait;
+ S.Diag(Found->getLocation(), diag::note_declared_at);
+ return true;
+ }
+
+ // Build the template-id.
+ QualType TraitTy = S.CheckTemplateIdType(TemplateName(TraitTD), Loc, Args);
+ if (TraitTy.isNull())
+ return true;
+ if (!S.isCompleteType(Loc, TraitTy)) {
+ if (DiagID)
+ S.RequireCompleteType(
+ Loc, TraitTy, DiagID,
+ printTemplateArgs(S.Context.getPrintingPolicy(), Args));
+ return true;
+ }
+
+ CXXRecordDecl *RD = TraitTy->getAsCXXRecordDecl();
+ assert(RD && "specialization of class template is not a class?");
+
+ // Look up the member of the trait type.
+ S.LookupQualifiedName(TraitMemberLookup, RD);
+ return TraitMemberLookup.isAmbiguous();
+}
+
+static TemplateArgumentLoc
+getTrivialIntegralTemplateArgument(Sema &S, SourceLocation Loc, QualType T,
+ uint64_t I) {
+ TemplateArgument Arg(S.Context, S.Context.MakeIntValue(I, T), T);
+ return S.getTrivialTemplateArgumentLoc(Arg, T, Loc);
+}
+
+static TemplateArgumentLoc
+getTrivialTypeTemplateArgument(Sema &S, SourceLocation Loc, QualType T) {
+ return S.getTrivialTemplateArgumentLoc(TemplateArgument(T), QualType(), Loc);
+}
+
+namespace { enum class IsTupleLike { TupleLike, NotTupleLike, Error }; }
+
+static IsTupleLike isTupleLike(Sema &S, SourceLocation Loc, QualType T,
+ llvm::APSInt &Size) {
+ EnterExpressionEvaluationContext ContextRAII(S, Sema::ConstantEvaluated);
+
+ DeclarationName Value = S.PP.getIdentifierInfo("value");
+ LookupResult R(S, Value, Loc, Sema::LookupOrdinaryName);
+
+ // Form template argument list for tuple_size<T>.
+ TemplateArgumentListInfo Args(Loc, Loc);
+ Args.addArgument(getTrivialTypeTemplateArgument(S, Loc, T));
+
+ // If there's no tuple_size specialization, it's not tuple-like.
+ if (lookupStdTypeTraitMember(S, R, Loc, "tuple_size", Args, /*DiagID*/0))
+ return IsTupleLike::NotTupleLike;
+
+ // If we get this far, we've committed to the tuple interpretation, but
+ // we can still fail if there actually isn't a usable ::value.
+
+ struct ICEDiagnoser : Sema::VerifyICEDiagnoser {
+ LookupResult &R;
+ TemplateArgumentListInfo &Args;
+ ICEDiagnoser(LookupResult &R, TemplateArgumentListInfo &Args)
+ : R(R), Args(Args) {}
+ void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) {
+ S.Diag(Loc, diag::err_decomp_decl_std_tuple_size_not_constant)
+ << printTemplateArgs(S.Context.getPrintingPolicy(), Args);
+ }
+ } Diagnoser(R, Args);
+
+ if (R.empty()) {
+ Diagnoser.diagnoseNotICE(S, Loc, SourceRange());
+ return IsTupleLike::Error;
+ }
+
+ ExprResult E =
+ S.BuildDeclarationNameExpr(CXXScopeSpec(), R, /*NeedsADL*/false);
+ if (E.isInvalid())
+ return IsTupleLike::Error;
+
+ E = S.VerifyIntegerConstantExpression(E.get(), &Size, Diagnoser, false);
+ if (E.isInvalid())
+ return IsTupleLike::Error;
+
+ return IsTupleLike::TupleLike;
+}
+
+/// \return std::tuple_element<I, T>::type.
+static QualType getTupleLikeElementType(Sema &S, SourceLocation Loc,
+ unsigned I, QualType T) {
+ // Form template argument list for tuple_element<I, T>.
+ TemplateArgumentListInfo Args(Loc, Loc);
+ Args.addArgument(
+ getTrivialIntegralTemplateArgument(S, Loc, S.Context.getSizeType(), I));
+ Args.addArgument(getTrivialTypeTemplateArgument(S, Loc, T));
+
+ DeclarationName TypeDN = S.PP.getIdentifierInfo("type");
+ LookupResult R(S, TypeDN, Loc, Sema::LookupOrdinaryName);
+ if (lookupStdTypeTraitMember(
+ S, R, Loc, "tuple_element", Args,
+ diag::err_decomp_decl_std_tuple_element_not_specialized))
+ return QualType();
+
+ auto *TD = R.getAsSingle<TypeDecl>();
+ if (!TD) {
+ R.suppressDiagnostics();
+ S.Diag(Loc, diag::err_decomp_decl_std_tuple_element_not_specialized)
+ << printTemplateArgs(S.Context.getPrintingPolicy(), Args);
+ if (!R.empty())
+ S.Diag(R.getRepresentativeDecl()->getLocation(), diag::note_declared_at);
+ return QualType();
+ }
+
+ return S.Context.getTypeDeclType(TD);
+}
+
+namespace {
+struct BindingDiagnosticTrap {
+ Sema &S;
+ DiagnosticErrorTrap Trap;
+ BindingDecl *BD;
+
+ BindingDiagnosticTrap(Sema &S, BindingDecl *BD)
+ : S(S), Trap(S.Diags), BD(BD) {}
+ ~BindingDiagnosticTrap() {
+ if (Trap.hasErrorOccurred())
+ S.Diag(BD->getLocation(), diag::note_in_binding_decl_init) << BD;
+ }
+};
+}
+
+static bool checkTupleLikeDecomposition(Sema &S,
+ ArrayRef<BindingDecl *> Bindings,
+ VarDecl *Src, QualType DecompType,
+ const llvm::APSInt &TupleSize) {
+ if ((int64_t)Bindings.size() != TupleSize) {
+ S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings)
+ << DecompType << (unsigned)Bindings.size() << TupleSize.toString(10)
+ << (TupleSize < Bindings.size());
+ return true;
+ }
+
+ if (Bindings.empty())
+ return false;
+
+ DeclarationName GetDN = S.PP.getIdentifierInfo("get");
+
+ // [dcl.decomp]p3:
+ // The unqualified-id get is looked up in the scope of E by class member
+ // access lookup
+ LookupResult MemberGet(S, GetDN, Src->getLocation(), Sema::LookupMemberName);
+ bool UseMemberGet = false;
+ if (S.isCompleteType(Src->getLocation(), DecompType)) {
+ if (auto *RD = DecompType->getAsCXXRecordDecl())
+ S.LookupQualifiedName(MemberGet, RD);
+ if (MemberGet.isAmbiguous())
+ return true;
+ UseMemberGet = !MemberGet.empty();
+ S.FilterAcceptableTemplateNames(MemberGet);
+ }
+
+ unsigned I = 0;
+ for (auto *B : Bindings) {
+ BindingDiagnosticTrap Trap(S, B);
+ SourceLocation Loc = B->getLocation();
+
+ ExprResult E = S.BuildDeclRefExpr(Src, DecompType, VK_LValue, Loc);
+ if (E.isInvalid())
+ return true;
+
+ // e is an lvalue if the type of the entity is an lvalue reference and
+ // an xvalue otherwise
+ if (!Src->getType()->isLValueReferenceType())
+ E = ImplicitCastExpr::Create(S.Context, E.get()->getType(), CK_NoOp,
+ E.get(), nullptr, VK_XValue);
+
+ TemplateArgumentListInfo Args(Loc, Loc);
+ Args.addArgument(
+ getTrivialIntegralTemplateArgument(S, Loc, S.Context.getSizeType(), I));
+
+ if (UseMemberGet) {
+ // if [lookup of member get] finds at least one declaration, the
+ // initializer is e.get<i-1>().
+ E = S.BuildMemberReferenceExpr(E.get(), DecompType, Loc, false,
+ CXXScopeSpec(), SourceLocation(), nullptr,
+ MemberGet, &Args, nullptr);
+ if (E.isInvalid())
+ return true;
+
+ E = S.ActOnCallExpr(nullptr, E.get(), Loc, None, Loc);
+ } else {
+ // Otherwise, the initializer is get<i-1>(e), where get is looked up
+ // in the associated namespaces.
+ Expr *Get = UnresolvedLookupExpr::Create(
+ S.Context, nullptr, NestedNameSpecifierLoc(), SourceLocation(),
+ DeclarationNameInfo(GetDN, Loc), /*RequiresADL*/true, &Args,
+ UnresolvedSetIterator(), UnresolvedSetIterator());
+
+ Expr *Arg = E.get();
+ E = S.ActOnCallExpr(nullptr, Get, Loc, Arg, Loc);
+ }
+ if (E.isInvalid())
+ return true;
+ Expr *Init = E.get();
+
+ // Given the type T designated by std::tuple_element<i - 1, E>::type,
+ QualType T = getTupleLikeElementType(S, Loc, I, DecompType);
+ if (T.isNull())
+ return true;
+
+ // each vi is a variable of type "reference to T" initialized with the
+ // initializer, where the reference is an lvalue reference if the
+ // initializer is an lvalue and an rvalue reference otherwise
+ QualType RefType =
+ S.BuildReferenceType(T, E.get()->isLValue(), Loc, B->getDeclName());
+ if (RefType.isNull())
+ return true;
+ auto *RefVD = VarDecl::Create(
+ S.Context, Src->getDeclContext(), Loc, Loc,
+ B->getDeclName().getAsIdentifierInfo(), RefType,
+ S.Context.getTrivialTypeSourceInfo(T, Loc), Src->getStorageClass());
+ RefVD->setLexicalDeclContext(Src->getLexicalDeclContext());
+ RefVD->setTSCSpec(Src->getTSCSpec());
+ RefVD->setImplicit();
+ if (Src->isInlineSpecified())
+ RefVD->setInlineSpecified();
+ RefVD->getLexicalDeclContext()->addHiddenDecl(RefVD);
+
+ InitializedEntity Entity = InitializedEntity::InitializeBinding(RefVD);
+ InitializationKind Kind = InitializationKind::CreateCopy(Loc, Loc);
+ InitializationSequence Seq(S, Entity, Kind, Init);
+ E = Seq.Perform(S, Entity, Kind, Init);
+ if (E.isInvalid())
+ return true;
+ E = S.ActOnFinishFullExpr(E.get(), Loc);
+ if (E.isInvalid())
+ return true;
+ RefVD->setInit(E.get());
+ RefVD->checkInitIsICE();
+
+ E = S.BuildDeclarationNameExpr(CXXScopeSpec(),
+ DeclarationNameInfo(B->getDeclName(), Loc),
+ RefVD);
+ if (E.isInvalid())
+ return true;
+
+ B->setBinding(T, E.get());
+ I++;
+ }
+
+ return false;
+}
+
+/// Find the base class to decompose in a built-in decomposition of a class type.
+/// This base class search is, unfortunately, not quite like any other that we
+/// perform anywhere else in C++.
+static const CXXRecordDecl *findDecomposableBaseClass(Sema &S,
+ SourceLocation Loc,
+ const CXXRecordDecl *RD,
+ CXXCastPath &BasePath) {
+ auto BaseHasFields = [](const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path) {
+ return Specifier->getType()->getAsCXXRecordDecl()->hasDirectFields();
+ };
+
+ const CXXRecordDecl *ClassWithFields = nullptr;
+ if (RD->hasDirectFields())
+ // [dcl.decomp]p4:
+ // Otherwise, all of E's non-static data members shall be public direct
+ // members of E ...
+ ClassWithFields = RD;
+ else {
+ // ... or of ...
+ CXXBasePaths Paths;
+ Paths.setOrigin(const_cast<CXXRecordDecl*>(RD));
+ if (!RD->lookupInBases(BaseHasFields, Paths)) {
+ // If no classes have fields, just decompose RD itself. (This will work
+ // if and only if zero bindings were provided.)
+ return RD;
+ }
+
+ CXXBasePath *BestPath = nullptr;
+ for (auto &P : Paths) {
+ if (!BestPath)
+ BestPath = &P;
+ else if (!S.Context.hasSameType(P.back().Base->getType(),
+ BestPath->back().Base->getType())) {
+ // ... the same ...
+ S.Diag(Loc, diag::err_decomp_decl_multiple_bases_with_members)
+ << false << RD << BestPath->back().Base->getType()
+ << P.back().Base->getType();
+ return nullptr;
+ } else if (P.Access < BestPath->Access) {
+ BestPath = &P;
+ }
+ }
+
+ // ... unambiguous ...
+ QualType BaseType = BestPath->back().Base->getType();
+ if (Paths.isAmbiguous(S.Context.getCanonicalType(BaseType))) {
+ S.Diag(Loc, diag::err_decomp_decl_ambiguous_base)
+ << RD << BaseType << S.getAmbiguousPathsDisplayString(Paths);
+ return nullptr;
+ }
+
+ // ... public base class of E.
+ if (BestPath->Access != AS_public) {
+ S.Diag(Loc, diag::err_decomp_decl_non_public_base)
+ << RD << BaseType;
+ for (auto &BS : *BestPath) {
+ if (BS.Base->getAccessSpecifier() != AS_public) {
+ S.Diag(BS.Base->getLocStart(), diag::note_access_constrained_by_path)
+ << (BS.Base->getAccessSpecifier() == AS_protected)
+ << (BS.Base->getAccessSpecifierAsWritten() == AS_none);
+ break;
+ }
+ }
+ return nullptr;
+ }
+
+ ClassWithFields = BaseType->getAsCXXRecordDecl();
+ S.BuildBasePathArray(Paths, BasePath);
+ }
+
+ // The above search did not check whether the selected class itself has base
+ // classes with fields, so check that now.
+ CXXBasePaths Paths;
+ if (ClassWithFields->lookupInBases(BaseHasFields, Paths)) {
+ S.Diag(Loc, diag::err_decomp_decl_multiple_bases_with_members)
+ << (ClassWithFields == RD) << RD << ClassWithFields
+ << Paths.front().back().Base->getType();
+ return nullptr;
+ }
+
+ return ClassWithFields;
+}
+
+static bool checkMemberDecomposition(Sema &S, ArrayRef<BindingDecl*> Bindings,
+ ValueDecl *Src, QualType DecompType,
+ const CXXRecordDecl *RD) {
+ CXXCastPath BasePath;
+ RD = findDecomposableBaseClass(S, Src->getLocation(), RD, BasePath);
+ if (!RD)
+ return true;
+ QualType BaseType = S.Context.getQualifiedType(S.Context.getRecordType(RD),
+ DecompType.getQualifiers());
+
+ auto DiagnoseBadNumberOfBindings = [&]() -> bool {
+ unsigned NumFields =
+ std::count_if(RD->field_begin(), RD->field_end(),
+ [](FieldDecl *FD) { return !FD->isUnnamedBitfield(); });
+ assert(Bindings.size() != NumFields);
+ S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings)
+ << DecompType << (unsigned)Bindings.size() << NumFields
+ << (NumFields < Bindings.size());
+ return true;
+ };
+
+ // all of E's non-static data members shall be public [...] members,
+ // E shall not have an anonymous union member, ...
+ unsigned I = 0;
+ for (auto *FD : RD->fields()) {
+ if (FD->isUnnamedBitfield())
+ continue;
+
+ if (FD->isAnonymousStructOrUnion()) {
+ S.Diag(Src->getLocation(), diag::err_decomp_decl_anon_union_member)
+ << DecompType << FD->getType()->isUnionType();
+ S.Diag(FD->getLocation(), diag::note_declared_at);
+ return true;
+ }
+
+ // We have a real field to bind.
+ if (I >= Bindings.size())
+ return DiagnoseBadNumberOfBindings();
+ auto *B = Bindings[I++];
+
+ SourceLocation Loc = B->getLocation();
+ if (FD->getAccess() != AS_public) {
+ S.Diag(Loc, diag::err_decomp_decl_non_public_member) << FD << DecompType;
+
+ // Determine whether the access specifier was explicit.
+ bool Implicit = true;
+ for (const auto *D : RD->decls()) {
+ if (declaresSameEntity(D, FD))
+ break;
+ if (isa<AccessSpecDecl>(D)) {
+ Implicit = false;
+ break;
+ }
+ }
+
+ S.Diag(FD->getLocation(), diag::note_access_natural)
+ << (FD->getAccess() == AS_protected) << Implicit;
+ return true;
+ }
+
+ // Initialize the binding to Src.FD.
+ ExprResult E = S.BuildDeclRefExpr(Src, DecompType, VK_LValue, Loc);
+ if (E.isInvalid())
+ return true;
+ E = S.ImpCastExprToType(E.get(), BaseType, CK_UncheckedDerivedToBase,
+ VK_LValue, &BasePath);
+ if (E.isInvalid())
+ return true;
+ E = S.BuildFieldReferenceExpr(E.get(), /*IsArrow*/ false, Loc,
+ CXXScopeSpec(), FD,
+ DeclAccessPair::make(FD, FD->getAccess()),
+ DeclarationNameInfo(FD->getDeclName(), Loc));
+ if (E.isInvalid())
+ return true;
+
+ // If the type of the member is T, the referenced type is cv T, where cv is
+ // the cv-qualification of the decomposition expression.
+ //
+ // FIXME: We resolve a defect here: if the field is mutable, we do not add
+ // 'const' to the type of the field.
+ Qualifiers Q = DecompType.getQualifiers();
+ if (FD->isMutable())
+ Q.removeConst();
+ B->setBinding(S.BuildQualifiedType(FD->getType(), Loc, Q), E.get());
+ }
+
+ if (I != Bindings.size())
+ return DiagnoseBadNumberOfBindings();
+
+ return false;
+}
+
+void Sema::CheckCompleteDecompositionDeclaration(DecompositionDecl *DD) {
+ QualType DecompType = DD->getType();
+
+ // If the type of the decomposition is dependent, then so is the type of
+ // each binding.
+ if (DecompType->isDependentType()) {
+ for (auto *B : DD->bindings())
+ B->setType(Context.DependentTy);
+ return;
+ }
+
+ DecompType = DecompType.getNonReferenceType();
+ ArrayRef<BindingDecl*> Bindings = DD->bindings();
+
+ // C++1z [dcl.decomp]/2:
+ // If E is an array type [...]
+ // As an extension, we also support decomposition of built-in complex and
+ // vector types.
+ if (auto *CAT = Context.getAsConstantArrayType(DecompType)) {
+ if (checkArrayDecomposition(*this, Bindings, DD, DecompType, CAT))
+ DD->setInvalidDecl();
+ return;
+ }
+ if (auto *VT = DecompType->getAs<VectorType>()) {
+ if (checkVectorDecomposition(*this, Bindings, DD, DecompType, VT))
+ DD->setInvalidDecl();
+ return;
+ }
+ if (auto *CT = DecompType->getAs<ComplexType>()) {
+ if (checkComplexDecomposition(*this, Bindings, DD, DecompType, CT))
+ DD->setInvalidDecl();
+ return;
+ }
+
+ // C++1z [dcl.decomp]/3:
+ // if the expression std::tuple_size<E>::value is a well-formed integral
+ // constant expression, [...]
+ llvm::APSInt TupleSize(32);
+ switch (isTupleLike(*this, DD->getLocation(), DecompType, TupleSize)) {
+ case IsTupleLike::Error:
+ DD->setInvalidDecl();
+ return;
+
+ case IsTupleLike::TupleLike:
+ if (checkTupleLikeDecomposition(*this, Bindings, DD, DecompType, TupleSize))
+ DD->setInvalidDecl();
+ return;
+
+ case IsTupleLike::NotTupleLike:
+ break;
+ }
+
+ // C++1z [dcl.dcl]/8:
+ // [E shall be of array or non-union class type]
+ CXXRecordDecl *RD = DecompType->getAsCXXRecordDecl();
+ if (!RD || RD->isUnion()) {
+ Diag(DD->getLocation(), diag::err_decomp_decl_unbindable_type)
+ << DD << !RD << DecompType;
+ DD->setInvalidDecl();
+ return;
+ }
+
+ // C++1z [dcl.decomp]/4:
+ // all of E's non-static data members shall be [...] direct members of
+ // E or of the same unambiguous public base class of E, ...
+ if (checkMemberDecomposition(*this, Bindings, DD, DecompType, RD))
+ DD->setInvalidDecl();
+}
+
/// \brief Merge the exception specifications of two variable declarations.
///
/// This is called when there's a redeclaration of a VarDecl. The function
@@ -912,7 +1673,8 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
// C++11 and permitted in C++1y, so ignore them.
continue;
- case Decl::Var: {
+ case Decl::Var:
+ case Decl::Decomposition: {
// C++1y [dcl.constexpr]p3 allows anything except:
// a definition of a variable of non-literal type or of static or
// thread storage duration or for which no initialization is performed.
@@ -2192,7 +2954,8 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
} else {
Member = HandleField(S, cast<CXXRecordDecl>(CurContext), Loc, D,
BitWidth, InitStyle, AS);
- assert(Member && "HandleField never returns null");
+ if (!Member)
+ return nullptr;
}
} else {
Member = HandleDeclarator(S, D, TemplateParameterLists);
@@ -3483,98 +4246,30 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
CtorArg = CastForMoving(SemaRef, CtorArg.get());
}
- // When the field we are copying is an array, create index variables for
- // each dimension of the array. We use these index variables to subscript
- // the source array, and other clients (e.g., CodeGen) will perform the
- // necessary iteration with these index variables.
- SmallVector<VarDecl *, 4> IndexVariables;
- QualType BaseType = Field->getType();
- QualType SizeType = SemaRef.Context.getSizeType();
- bool InitializingArray = false;
- while (const ConstantArrayType *Array
- = SemaRef.Context.getAsConstantArrayType(BaseType)) {
- InitializingArray = true;
- // Create the iteration variable for this array index.
- IdentifierInfo *IterationVarName = nullptr;
- {
- SmallString<8> Str;
- llvm::raw_svector_ostream OS(Str);
- OS << "__i" << IndexVariables.size();
- IterationVarName = &SemaRef.Context.Idents.get(OS.str());
- }
- VarDecl *IterationVar
- = VarDecl::Create(SemaRef.Context, SemaRef.CurContext, Loc, Loc,
- IterationVarName, SizeType,
- SemaRef.Context.getTrivialTypeSourceInfo(SizeType, Loc),
- SC_None);
- IndexVariables.push_back(IterationVar);
-
- // Create a reference to the iteration variable.
- ExprResult IterationVarRef
- = SemaRef.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc);
- assert(!IterationVarRef.isInvalid() &&
- "Reference to invented variable cannot fail!");
- IterationVarRef = SemaRef.DefaultLvalueConversion(IterationVarRef.get());
- assert(!IterationVarRef.isInvalid() &&
- "Conversion of invented variable cannot fail!");
-
- // Subscript the array with this iteration variable.
- CtorArg = SemaRef.CreateBuiltinArraySubscriptExpr(CtorArg.get(), Loc,
- IterationVarRef.get(),
- Loc);
- if (CtorArg.isInvalid())
- return true;
+ InitializedEntity Entity =
+ Indirect ? InitializedEntity::InitializeMember(Indirect, nullptr,
+ /*Implicit*/ true)
+ : InitializedEntity::InitializeMember(Field, nullptr,
+ /*Implicit*/ true);
- BaseType = Array->getElementType();
- }
-
- // The array subscript expression is an lvalue, which is wrong for moving.
- if (Moving && InitializingArray)
- CtorArg = CastForMoving(SemaRef, CtorArg.get());
-
- // Construct the entity that we will be initializing. For an array, this
- // will be first element in the array, which may require several levels
- // of array-subscript entities.
- SmallVector<InitializedEntity, 4> Entities;
- Entities.reserve(1 + IndexVariables.size());
- if (Indirect)
- Entities.push_back(InitializedEntity::InitializeMember(Indirect));
- else
- Entities.push_back(InitializedEntity::InitializeMember(Field));
- for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I)
- Entities.push_back(InitializedEntity::InitializeElement(SemaRef.Context,
- 0,
- Entities.back()));
-
// Direct-initialize to use the copy constructor.
InitializationKind InitKind =
InitializationKind::CreateDirect(Loc, SourceLocation(), SourceLocation());
Expr *CtorArgE = CtorArg.getAs<Expr>();
- InitializationSequence InitSeq(SemaRef, Entities.back(), InitKind,
- CtorArgE);
-
- ExprResult MemberInit
- = InitSeq.Perform(SemaRef, Entities.back(), InitKind,
- MultiExprArg(&CtorArgE, 1));
+ InitializationSequence InitSeq(SemaRef, Entity, InitKind, CtorArgE);
+ ExprResult MemberInit =
+ InitSeq.Perform(SemaRef, Entity, InitKind, MultiExprArg(&CtorArgE, 1));
MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit);
if (MemberInit.isInvalid())
return true;
- if (Indirect) {
- assert(IndexVariables.size() == 0 &&
- "Indirect field improperly initialized");
- CXXMemberInit
- = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Indirect,
- Loc, Loc,
- MemberInit.getAs<Expr>(),
- Loc);
- } else
- CXXMemberInit = CXXCtorInitializer::Create(SemaRef.Context, Field, Loc,
- Loc, MemberInit.getAs<Expr>(),
- Loc,
- IndexVariables.data(),
- IndexVariables.size());
+ if (Indirect)
+ CXXMemberInit = new (SemaRef.Context) CXXCtorInitializer(
+ SemaRef.Context, Indirect, Loc, Loc, MemberInit.getAs<Expr>(), Loc);
+ else
+ CXXMemberInit = new (SemaRef.Context) CXXCtorInitializer(
+ SemaRef.Context, Field, Loc, Loc, MemberInit.getAs<Expr>(), Loc);
return false;
}
@@ -3585,9 +4280,11 @@ BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
SemaRef.Context.getBaseElementType(Field->getType());
if (FieldBaseElementType->isRecordType()) {
- InitializedEntity InitEntity
- = Indirect? InitializedEntity::InitializeMember(Indirect)
- : InitializedEntity::InitializeMember(Field);
+ InitializedEntity InitEntity =
+ Indirect ? InitializedEntity::InitializeMember(Indirect, nullptr,
+ /*Implicit*/ true)
+ : InitializedEntity::InitializeMember(Field, nullptr,
+ /*Implicit*/ true);
InitializationKind InitKind =
InitializationKind::CreateDefault(Loc);
@@ -4778,7 +5475,8 @@ void Sema::checkClassLevelDLLAttribute(CXXRecordDecl *Class) {
if (MD->isInlined()) {
// MinGW does not import or export inline methods.
- if (!Context.getTargetInfo().getCXXABI().isMicrosoft())
+ if (!Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ !Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment())
continue;
// MSVC versions before 2015 don't export the move assignment operators
@@ -5333,7 +6031,8 @@ void Sema::EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD)
return;
// Evaluate the exception specification.
- auto ESI = computeImplicitExceptionSpec(*this, Loc, MD).getExceptionSpec();
+ auto IES = computeImplicitExceptionSpec(*this, Loc, MD);
+ auto ESI = IES.getExceptionSpec();
// Update the type of the special member to use it.
UpdateExceptionSpec(MD, ESI);
@@ -5531,8 +6230,8 @@ void Sema::CheckExplicitlyDefaultedMemberExceptionSpec(
CallingConv CC = Context.getDefaultCallingConvention(/*IsVariadic=*/false,
/*IsCXXMethod=*/true);
FunctionProtoType::ExtProtoInfo EPI(CC);
- EPI.ExceptionSpec = computeImplicitExceptionSpec(*this, MD->getLocation(), MD)
- .getExceptionSpec();
+ auto IES = computeImplicitExceptionSpec(*this, MD->getLocation(), MD);
+ EPI.ExceptionSpec = IES.getExceptionSpec();
const FunctionProtoType *ImplicitType = cast<FunctionProtoType>(
Context.getFunctionType(Context.VoidTy, None, EPI));
@@ -5889,8 +6588,13 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
bool SpecialMemberDeletionInfo::shouldDeleteForAllConstMembers() {
// This is a silly definition, because it gives an empty union a deleted
// default constructor. Don't do that.
- if (CSM == Sema::CXXDefaultConstructor && inUnion() && AllFieldsAreConst &&
- !MD->getParent()->field_empty()) {
+ if (CSM == Sema::CXXDefaultConstructor && inUnion() && AllFieldsAreConst) {
+ bool AnyFields = false;
+ for (auto *F : MD->getParent()->fields())
+ if ((AnyFields = !F->isUnnamedBitfield()))
+ break;
+ if (!AnyFields)
+ return false;
if (Diagnose)
S.Diag(MD->getParent()->getLocation(),
diag::note_deleted_default_ctor_all_const)
@@ -5939,10 +6643,15 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
(CSM == CXXCopyConstructor || CSM == CXXCopyAssignment)) {
CXXMethodDecl *UserDeclaredMove = nullptr;
- // In Microsoft mode, a user-declared move only causes the deletion of the
- // corresponding copy operation, not both copy operations.
+ // In Microsoft mode up to MSVC 2013, a user-declared move only causes the
+ // deletion of the corresponding copy operation, not both copy operations.
+ // MSVC 2015 has adopted the standards conforming behavior.
+ bool DeletesOnlyMatchingCopy =
+ getLangOpts().MSVCCompat &&
+ !getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2015);
+
if (RD->hasUserDeclaredMoveConstructor() &&
- (!getLangOpts().MSVCCompat || CSM == CXXCopyConstructor)) {
+ (!DeletesOnlyMatchingCopy || CSM == CXXCopyConstructor)) {
if (!Diagnose) return true;
// Find any user-declared move constructor.
@@ -5954,7 +6663,7 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
}
assert(UserDeclaredMove);
} else if (RD->hasUserDeclaredMoveAssignment() &&
- (!getLangOpts().MSVCCompat || CSM == CXXCopyAssignment)) {
+ (!DeletesOnlyMatchingCopy || CSM == CXXCopyAssignment)) {
if (!Diagnose) return true;
// Find any user-declared move assignment operator.
@@ -5987,7 +6696,7 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
DeclarationName Name =
Context.DeclarationNames.getCXXOperatorName(OO_Delete);
if (FindDeallocationFunction(MD->getLocation(), MD->getParent(), Name,
- OperatorDelete, false)) {
+ OperatorDelete, /*Diagnose*/false)) {
if (Diagnose)
Diag(RD->getLocation(), diag::note_deleted_dtor_no_operator_delete);
return true;
@@ -5997,13 +6706,14 @@ bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
SpecialMemberDeletionInfo SMI(*this, MD, CSM, ICI, Diagnose);
for (auto &BI : RD->bases())
- if (!BI.isVirtual() &&
+ if ((SMI.IsAssignment || !BI.isVirtual()) &&
SMI.shouldDeleteForBase(&BI))
return true;
// Per DR1611, do not consider virtual bases of constructors of abstract
- // classes, since we are not going to construct them.
- if (!RD->isAbstract() || !SMI.IsConstructor) {
+ // classes, since we are not going to construct them. For assignment
+ // operators, we only assign (and thus only consider) direct bases.
+ if ((!RD->isAbstract() || !SMI.IsConstructor) && !SMI.IsAssignment) {
for (auto &BI : RD->vbases())
if (SMI.shouldDeleteForBase(&BI))
return true;
@@ -6618,6 +7328,17 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
if (ClassDecl->needsOverloadResolutionForCopyConstructor() ||
ClassDecl->hasInheritedConstructor())
DeclareImplicitCopyConstructor(ClassDecl);
+ // For the MS ABI we need to know whether the copy ctor is deleted. A
+ // prerequisite for deleting the implicit copy ctor is that the class has a
+ // move ctor or move assignment that is either user-declared or whose
+ // semantics are inherited from a subobject. FIXME: We should provide a more
+ // direct way for CodeGen to ask whether the constructor was deleted.
+ else if (Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ (ClassDecl->hasUserDeclaredMoveConstructor() ||
+ ClassDecl->needsOverloadResolutionForMoveConstructor() ||
+ ClassDecl->hasUserDeclaredMoveAssignment() ||
+ ClassDecl->needsOverloadResolutionForMoveAssignment()))
+ DeclareImplicitCopyConstructor(ClassDecl);
}
if (getLangOpts().CPlusPlus11 && ClassDecl->needsImplicitMoveConstructor()) {
@@ -6926,19 +7647,11 @@ bool Sema::CheckDestructor(CXXDestructorDecl *Destructor) {
Loc = RD->getLocation();
// If we have a virtual destructor, look up the deallocation function
- FunctionDecl *OperatorDelete = nullptr;
- DeclarationName Name =
- Context.DeclarationNames.getCXXOperatorName(OO_Delete);
- if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete))
- return true;
- // If there's no class-specific operator delete, look up the global
- // non-array delete.
- if (!OperatorDelete)
- OperatorDelete = FindUsualDeallocationFunction(Loc, true, Name);
-
- MarkFunctionReferenced(Loc, OperatorDelete);
-
- Destructor->setOperatorDelete(OperatorDelete);
+ if (FunctionDecl *OperatorDelete =
+ FindDeallocationFunctionForDestructor(Loc, RD)) {
+ MarkFunctionReferenced(Loc, OperatorDelete);
+ Destructor->setOperatorDelete(OperatorDelete);
+ }
}
return false;
@@ -7320,7 +8033,7 @@ static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc,
S.Diag(Loc, diag::warn_inline_namespace_reopened_noninline)
<< FixItHint::CreateInsertion(KeywordLoc, "inline ");
else
- S.Diag(Loc, diag::err_inline_namespace_mismatch) << *IsInline;
+ S.Diag(Loc, diag::err_inline_namespace_mismatch);
S.Diag(PrevNS->getLocation(), diag::note_previous_definition);
*IsInline = PrevNS->isInline();
@@ -7497,11 +8210,29 @@ CXXRecordDecl *Sema::getStdBadAlloc() const {
StdBadAlloc.get(Context.getExternalSource()));
}
+EnumDecl *Sema::getStdAlignValT() const {
+ return cast_or_null<EnumDecl>(StdAlignValT.get(Context.getExternalSource()));
+}
+
NamespaceDecl *Sema::getStdNamespace() const {
return cast_or_null<NamespaceDecl>(
StdNamespace.get(Context.getExternalSource()));
}
+NamespaceDecl *Sema::lookupStdExperimentalNamespace() {
+ if (!StdExperimentalNamespaceCache) {
+ if (auto Std = getStdNamespace()) {
+ LookupResult Result(*this, &PP.getIdentifierTable().get("experimental"),
+ SourceLocation(), LookupNamespaceName);
+ if (!LookupQualifiedName(Result, Std) ||
+ !(StdExperimentalNamespaceCache =
+ Result.getAsSingle<NamespaceDecl>()))
+ Result.suppressDiagnostics();
+ }
+ }
+ return StdExperimentalNamespaceCache;
+}
+
/// \brief Retrieve the special "std" namespace, which may require us to
/// implicitly define the namespace.
NamespaceDecl *Sema::getOrCreateStdNamespace() {
@@ -7801,15 +8532,19 @@ void Sema::PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir) {
Decl *Sema::ActOnUsingDeclaration(Scope *S,
AccessSpecifier AS,
- bool HasUsingKeyword,
SourceLocation UsingLoc,
+ SourceLocation TypenameLoc,
CXXScopeSpec &SS,
UnqualifiedId &Name,
- AttributeList *AttrList,
- bool HasTypenameKeyword,
- SourceLocation TypenameLoc) {
+ SourceLocation EllipsisLoc,
+ AttributeList *AttrList) {
assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
+ if (SS.isEmpty()) {
+ Diag(Name.getLocStart(), diag::err_using_requires_qualname);
+ return nullptr;
+ }
+
switch (Name.getKind()) {
case UnqualifiedId::IK_ImplicitSelfParam:
case UnqualifiedId::IK_Identifier:
@@ -7848,21 +8583,30 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S,
return nullptr;
// Warn about access declarations.
- if (!HasUsingKeyword) {
+ if (UsingLoc.isInvalid()) {
Diag(Name.getLocStart(),
getLangOpts().CPlusPlus11 ? diag::err_access_decl
: diag::warn_access_decl_deprecated)
<< FixItHint::CreateInsertion(SS.getRange().getBegin(), "using ");
}
- if (DiagnoseUnexpandedParameterPack(SS, UPPC_UsingDeclaration) ||
- DiagnoseUnexpandedParameterPack(TargetNameInfo, UPPC_UsingDeclaration))
- return nullptr;
+ if (EllipsisLoc.isInvalid()) {
+ if (DiagnoseUnexpandedParameterPack(SS, UPPC_UsingDeclaration) ||
+ DiagnoseUnexpandedParameterPack(TargetNameInfo, UPPC_UsingDeclaration))
+ return nullptr;
+ } else {
+ if (!SS.getScopeRep()->containsUnexpandedParameterPack() &&
+ !TargetNameInfo.containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << SourceRange(SS.getBeginLoc(), TargetNameInfo.getEndLoc());
+ EllipsisLoc = SourceLocation();
+ }
+ }
- NamedDecl *UD = BuildUsingDeclaration(S, AS, UsingLoc, SS,
- TargetNameInfo, AttrList,
- /* IsInstantiation */ false,
- HasTypenameKeyword, TypenameLoc);
+ NamedDecl *UD =
+ BuildUsingDeclaration(S, AS, UsingLoc, TypenameLoc.isValid(), TypenameLoc,
+ SS, TargetNameInfo, EllipsisLoc, AttrList,
+ /*IsInstantiation*/false);
if (UD)
PushOnScopeChains(UD, S, /*AddToContext*/ false);
@@ -7925,6 +8669,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
diag::err_using_decl_nested_name_specifier_is_current_class)
<< Using->getQualifierLoc().getSourceRange();
Diag(Orig->getLocation(), diag::note_using_decl_target);
+ Using->setInvalidDecl();
return true;
}
@@ -7934,6 +8679,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
<< cast<CXXRecordDecl>(CurContext)
<< Using->getQualifierLoc().getSourceRange();
Diag(Orig->getLocation(), diag::note_using_decl_target);
+ Using->setInvalidDecl();
return true;
}
}
@@ -7957,7 +8703,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
// We can have UsingDecls in our Previous results because we use the same
// LookupResult for checking whether the UsingDecl itself is a valid
// redeclaration.
- if (isa<UsingDecl>(D))
+ if (isa<UsingDecl>(D) || isa<UsingPackDecl>(D))
continue;
if (IsEquivalentForUsingDecl(Context, D, Target)) {
@@ -8003,6 +8749,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
Diag(Target->getLocation(), diag::note_using_decl_target);
Diag(OldDecl->getLocation(), diag::note_using_decl_conflict);
+ Using->setInvalidDecl();
return true;
}
@@ -8015,6 +8762,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
Diag(Using->getLocation(), diag::err_using_decl_conflict);
Diag(Target->getLocation(), diag::note_using_decl_target);
Diag(Tag->getLocation(), diag::note_using_decl_conflict);
+ Using->setInvalidDecl();
return true;
}
@@ -8024,6 +8772,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
Diag(Using->getLocation(), diag::err_using_decl_conflict);
Diag(Target->getLocation(), diag::note_using_decl_target);
Diag(NonTag->getLocation(), diag::note_using_decl_conflict);
+ Using->setInvalidDecl();
return true;
}
@@ -8231,23 +8980,19 @@ private:
/// the lookup differently for these declarations.
NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
SourceLocation UsingLoc,
+ bool HasTypenameKeyword,
+ SourceLocation TypenameLoc,
CXXScopeSpec &SS,
DeclarationNameInfo NameInfo,
+ SourceLocation EllipsisLoc,
AttributeList *AttrList,
- bool IsInstantiation,
- bool HasTypenameKeyword,
- SourceLocation TypenameLoc) {
+ bool IsInstantiation) {
assert(!SS.isInvalid() && "Invalid CXXScopeSpec.");
SourceLocation IdentLoc = NameInfo.getLoc();
assert(IdentLoc.isValid() && "Invalid TargetName location.");
// FIXME: We ignore attributes for now.
- if (SS.isEmpty()) {
- Diag(IdentLoc, diag::err_using_requires_qualname);
- return nullptr;
- }
-
// For an inheriting constructor declaration, the name of the using
// declaration is the name of a constructor in this class, not in the
// base class.
@@ -8281,8 +9026,23 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
F.done();
} else {
assert(IsInstantiation && "no scope in non-instantiation");
- assert(CurContext->isRecord() && "scope not record in instantiation");
- LookupQualifiedName(Previous, CurContext);
+ if (CurContext->isRecord())
+ LookupQualifiedName(Previous, CurContext);
+ else {
+ // No redeclaration check is needed here; in non-member contexts we
+ // diagnosed all possible conflicts with other using-declarations when
+ // building the template:
+ //
+ // For a dependent non-type using declaration, the only valid case is
+ // if we instantiate to a single enumerator. We check for conflicts
+ // between shadow declarations we introduce, and we check in the template
+ // definition for conflicts between a non-type using declaration and any
+ // other declaration, which together covers all cases.
+ //
+ // A dependent typename using declaration will never successfully
+ // instantiate, since it will always name a class member, so we reject
+ // that in the template definition.
+ }
}
// Check for invalid redeclarations.
@@ -8291,22 +9051,24 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
return nullptr;
// Check for bad qualifiers.
- if (CheckUsingDeclQualifier(UsingLoc, SS, NameInfo, IdentLoc))
+ if (CheckUsingDeclQualifier(UsingLoc, HasTypenameKeyword, SS, NameInfo,
+ IdentLoc))
return nullptr;
DeclContext *LookupContext = computeDeclContext(SS);
NamedDecl *D;
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
- if (!LookupContext) {
+ if (!LookupContext || EllipsisLoc.isValid()) {
if (HasTypenameKeyword) {
// FIXME: not all declaration name kinds are legal here
D = UnresolvedUsingTypenameDecl::Create(Context, CurContext,
UsingLoc, TypenameLoc,
QualifierLoc,
- IdentLoc, NameInfo.getName());
+ IdentLoc, NameInfo.getName(),
+ EllipsisLoc);
} else {
D = UnresolvedUsingValueDecl::Create(Context, CurContext, UsingLoc,
- QualifierLoc, NameInfo);
+ QualifierLoc, NameInfo, EllipsisLoc);
}
D->setAccess(AS);
CurContext->addDecl(D);
@@ -8466,6 +9228,19 @@ NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
return UD;
}
+NamedDecl *Sema::BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
+ ArrayRef<NamedDecl *> Expansions) {
+ assert(isa<UnresolvedUsingValueDecl>(InstantiatedFrom) ||
+ isa<UnresolvedUsingTypenameDecl>(InstantiatedFrom) ||
+ isa<UsingPackDecl>(InstantiatedFrom));
+
+ auto *UPD =
+ UsingPackDecl::Create(Context, CurContext, InstantiatedFrom, Expansions);
+ UPD->setAccess(InstantiatedFrom->getAccess());
+ CurContext->addDecl(UPD);
+ return UPD;
+}
+
/// Additional checks for a using declaration referring to a constructor name.
bool Sema::CheckInheritingConstructorUsingDecl(UsingDecl *UD) {
assert(!UD->hasTypename() && "expecting a constructor name");
@@ -8502,6 +9277,8 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Prev) {
+ NestedNameSpecifier *Qual = SS.getScopeRep();
+
// C++03 [namespace.udecl]p8:
// C++0x [namespace.udecl]p10:
// A using-declaration is a declaration and can therefore be used
@@ -8509,10 +9286,28 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
// allowed.
//
// That's in non-member contexts.
- if (!CurContext->getRedeclContext()->isRecord())
+ if (!CurContext->getRedeclContext()->isRecord()) {
+ // A dependent qualifier outside a class can only ever resolve to an
+ // enumeration type. Therefore it conflicts with any other non-type
+ // declaration in the same scope.
+ // FIXME: How should we check for dependent type-type conflicts at block
+ // scope?
+ if (Qual->isDependent() && !HasTypenameKeyword) {
+ for (auto *D : Prev) {
+ if (!isa<TypeDecl>(D) && !isa<UsingDecl>(D) && !isa<UsingPackDecl>(D)) {
+ bool OldCouldBeEnumerator =
+ isa<UnresolvedUsingValueDecl>(D) || isa<EnumConstantDecl>(D);
+ Diag(NameLoc,
+ OldCouldBeEnumerator ? diag::err_redefinition
+ : diag::err_redefinition_different_kind)
+ << Prev.getLookupName();
+ Diag(D->getLocation(), diag::note_previous_definition);
+ return true;
+ }
+ }
+ }
return false;
-
- NestedNameSpecifier *Qual = SS.getScopeRep();
+ }
for (LookupResult::iterator I = Prev.begin(), E = Prev.end(); I != E; ++I) {
NamedDecl *D = *I;
@@ -8556,6 +9351,7 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
/// in the current context is appropriately related to the current
/// scope. If an error is found, diagnoses it and returns true.
bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
+ bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc) {
@@ -8566,9 +9362,11 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
// C++0x [namespace.udecl]p8:
// A using-declaration for a class member shall be a member-declaration.
- // If we weren't able to compute a valid scope, it must be a
- // dependent class scope.
- if (!NamedContext || NamedContext->getRedeclContext()->isRecord()) {
+ // If we weren't able to compute a valid scope, it might validly be a
+ // dependent class scope or a dependent enumeration unscoped scope. If
+ // we have a 'typename' keyword, the scope must resolve to a class type.
+ if ((HasTypename && !NamedContext) ||
+ (NamedContext && NamedContext->getRedeclContext()->isRecord())) {
auto *RD = NamedContext
? cast<CXXRecordDecl>(NamedContext->getRedeclContext())
: nullptr;
@@ -8628,7 +9426,8 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
if (getLangOpts().CPlusPlus11) {
// Convert 'using X::Y;' to 'auto &Y = X::Y;'.
FixIt = FixItHint::CreateReplacement(
- UsingLoc, "constexpr auto " + NameInfo.getName().getAsString() + " = ");
+ UsingLoc,
+ "constexpr auto " + NameInfo.getName().getAsString() + " = ");
}
Diag(UsingLoc, diag::note_using_decl_class_member_workaround)
@@ -8638,7 +9437,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
return true;
}
- // Otherwise, everything is known to be fine.
+ // Otherwise, this might be valid.
return false;
}
@@ -8683,11 +9482,13 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
return true;
}
- Diag(SS.getRange().getBegin(),
- diag::err_using_decl_nested_name_specifier_is_not_base_class)
- << SS.getScopeRep()
- << cast<CXXRecordDecl>(CurContext)
- << SS.getRange();
+ if (!cast<CXXRecordDecl>(NamedContext)->isInvalidDecl()) {
+ Diag(SS.getRange().getBegin(),
+ diag::err_using_decl_nested_name_specifier_is_not_base_class)
+ << SS.getScopeRep()
+ << cast<CXXRecordDecl>(CurContext)
+ << SS.getRange();
+ }
return true;
}
@@ -9493,7 +10294,11 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
Scope *S = getScopeForContext(ClassDecl);
CheckImplicitSpecialMemberDeclaration(S, Destructor);
- if (ShouldDeleteSpecialMember(Destructor, CXXDestructor))
+ // We can't check whether an implicit destructor is deleted before we complete
+ // the definition of the class, because its validity depends on the alignment
+ // of the class. We'll check this from ActOnFields once the class is complete.
+ if (ClassDecl->isCompleteDefinition() &&
+ ShouldDeleteSpecialMember(Destructor, CXXDestructor))
SetDeclDeleted(Destructor, ClassLoc);
// Introduce this destructor into its scope.
@@ -9560,7 +10365,7 @@ void Sema::ActOnFinishCXXMemberDecls() {
}
}
-static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) {
+static void checkDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) {
// Don't do anything for template patterns.
if (Class->getDescribedClassTemplate())
return;
@@ -9574,7 +10379,7 @@ static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) {
if (!CD) {
// Recurse on nested classes.
if (auto *NestedRD = dyn_cast<CXXRecordDecl>(Member))
- getDefaultArgExprsForConstructors(S, NestedRD);
+ checkDefaultArgExprsForConstructors(S, NestedRD);
continue;
} else if (!CD->isDefaultConstructor() || !CD->hasAttr<DLLExportAttr>()) {
continue;
@@ -9599,14 +10404,9 @@ static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) {
LastExportedDefaultCtor = CD;
for (unsigned I = 0; I != NumParams; ++I) {
- // Skip any default arguments that we've already instantiated.
- if (S.Context.getDefaultArgExprForConstructor(CD, I))
- continue;
-
- Expr *DefaultArg = S.BuildCXXDefaultArgExpr(Class->getLocation(), CD,
- CD->getParamDecl(I)).get();
+ (void)S.CheckCXXDefaultArgExpr(Class->getLocation(), CD,
+ CD->getParamDecl(I));
S.DiscardCleanupsInEvaluationContext();
- S.Context.addDefaultArgExprForConstructor(CD, I, DefaultArg);
}
}
}
@@ -9618,7 +10418,7 @@ void Sema::ActOnFinishCXXNonNestedClass(Decl *D) {
// have default arguments or don't use the standard calling convention are
// wrapped with a thunk called the default constructor closure.
if (RD && Context.getTargetInfo().getCXXABI().isMicrosoft())
- getDefaultArgExprsForConstructors(*this, RD);
+ checkDefaultArgExprsForConstructors(*this, RD);
referenceDLLExportedClassMethods();
}
@@ -11506,6 +12306,8 @@ Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
DeclInitType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) &&
"given constructor for wrong type");
MarkFunctionReferenced(ConstructLoc, Constructor);
+ if (getLangOpts().CUDA && !CheckCUDACall(ConstructLoc, Constructor))
+ return ExprError();
return CXXConstructExpr::Create(
Context, DeclInitType, ConstructLoc, Constructor, Elidable,
@@ -11534,13 +12336,20 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
// Lookup can return at most two results: the pattern for the field, or the
// injected class name of the parent record. No other member can have the
// same name as the field.
- assert(!Lookup.empty() && Lookup.size() <= 2 &&
+ // In modules mode, lookup can return multiple results (coming from
+ // different modules).
+ assert((getLangOpts().Modules || (!Lookup.empty() && Lookup.size() <= 2)) &&
"more than two lookup results for field name");
FieldDecl *Pattern = dyn_cast<FieldDecl>(Lookup[0]);
if (!Pattern) {
assert(isa<CXXRecordDecl>(Lookup[0]) &&
"cannot have other non-field member with same name");
- Pattern = cast<FieldDecl>(Lookup[1]);
+ for (auto L : Lookup)
+ if (isa<FieldDecl>(L)) {
+ Pattern = cast<FieldDecl>(L);
+ break;
+ }
+ assert(Pattern && "We must have set the Pattern!");
}
if (InstantiateInClassInitializer(Loc, Field, Pattern,
@@ -11564,14 +12373,9 @@ ExprResult Sema::BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field) {
// constructor before the initializer is lexically complete will ultimately
// come here at which point we can diagnose it.
RecordDecl *OutermostClass = ParentRD->getOuterLexicalRecordContext();
- if (OutermostClass == ParentRD) {
- Diag(Field->getLocEnd(), diag::err_in_class_initializer_not_yet_parsed)
- << ParentRD << Field;
- } else {
- Diag(Field->getLocEnd(),
- diag::err_in_class_initializer_not_yet_parsed_outer_class)
- << ParentRD << OutermostClass << Field;
- }
+ Diag(Loc, diag::err_in_class_initializer_not_yet_parsed)
+ << OutermostClass << Field;
+ Diag(Field->getLocEnd(), diag::note_in_class_initializer_not_yet_parsed);
return ExprError();
}
@@ -11963,6 +12767,9 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
if (FnDecl->isExternC()) {
Diag(FnDecl->getLocation(), diag::err_literal_operator_extern_c);
+ if (const LinkageSpecDecl *LSD =
+ FnDecl->getDeclContext()->getExternCContext())
+ Diag(LSD->getExternLoc(), diag::note_extern_c_begins_here);
return true;
}
@@ -12106,7 +12913,7 @@ bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
// Literal suffix identifiers that do not start with an underscore
// are reserved for future standardization.
Diag(FnDecl->getLocation(), diag::warn_user_literal_reserved)
- << NumericLiteralParser::isValidUDSuffix(getLangOpts(), LiteralName);
+ << StringLiteralParser::isValidUDSuffix(getLangOpts(), LiteralName);
}
return false;
@@ -12997,9 +13804,13 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
// and shall be the only declaration of the function or function
// template in the translation unit.
if (functionDeclHasDefaultArgument(FD)) {
- if (FunctionDecl *OldFD = FD->getPreviousDecl()) {
+ // We can't look at FD->getPreviousDecl() because it may not have been set
+ // if we're in a dependent context. If the function is known to be a
+ // redeclaration, we will have narrowed Previous down to the right decl.
+ if (D.isRedeclaration()) {
Diag(FD->getLocation(), diag::err_friend_decl_with_def_arg_redeclared);
- Diag(OldFD->getLocation(), diag::note_previous_declaration);
+ Diag(Previous.getRepresentativeDecl()->getLocation(),
+ diag::note_previous_declaration);
} else if (!D.isFunctionDefinition())
Diag(FD->getLocation(), diag::err_friend_decl_with_def_arg_must_be_def);
}
@@ -13052,7 +13863,7 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
// See if we're deleting a function which is already known to override a
// non-deleted virtual function.
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn)) {
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn)) {
bool IssuedDiagnostic = false;
for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
E = MD->end_overridden_methods();
@@ -13065,6 +13876,11 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
Diag((*I)->getLocation(), diag::note_overridden_virtual_function);
}
}
+ // If this function was implicitly deleted because it was defaulted,
+ // explain why it was deleted.
+ if (IssuedDiagnostic && MD->isDefaulted())
+ ShouldDeleteSpecialMember(MD, getSpecialMember(MD), nullptr,
+ /*Diagnose*/true);
}
// C++11 [basic.start.main]p3:
@@ -13072,6 +13888,9 @@ void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
if (Fn->isMain())
Diag(DelLoc, diag::err_deleted_main);
+ // C++11 [dcl.fct.def.delete]p4:
+ // A deleted function is implicitly inline.
+ Fn->setImplicitlyInline();
Fn->setDeletedAsWritten();
}
@@ -13461,6 +14280,8 @@ bool Sema::DefineUsedVTables() {
CXXRecordDecl *Class = VTableUses[I].first->getDefinition();
if (!Class)
continue;
+ TemplateSpecializationKind ClassTSK =
+ Class->getTemplateSpecializationKind();
SourceLocation Loc = VTableUses[I].second;
@@ -13484,9 +14305,8 @@ bool Sema::DefineUsedVTables() {
// of an explicit instantiation declaration, suppress the
// vtable; it will live with the explicit instantiation
// definition.
- bool IsExplicitInstantiationDeclaration
- = Class->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDeclaration;
+ bool IsExplicitInstantiationDeclaration =
+ ClassTSK == TSK_ExplicitInstantiationDeclaration;
for (auto R : Class->redecls()) {
TemplateSpecializationKind TSK
= cast<CXXRecordDecl>(R)->getTemplateSpecializationKind();
@@ -13519,17 +14339,20 @@ bool Sema::DefineUsedVTables() {
if (VTablesUsed[Canonical])
Consumer.HandleVTable(Class);
- // Optionally warn if we're emitting a weak vtable.
- if (Class->isExternallyVisible() &&
- Class->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) {
+ // Warn if we're emitting a weak vtable. The vtable will be weak if there is
+ // no key function or the key function is inlined. Don't warn in C++ ABIs
+ // that lack key functions, since the user won't be able to make one.
+ if (Context.getTargetInfo().getCXXABI().hasKeyFunctions() &&
+ Class->isExternallyVisible() && ClassTSK != TSK_ImplicitInstantiation) {
const FunctionDecl *KeyFunctionDef = nullptr;
- if (!KeyFunction ||
- (KeyFunction->hasBody(KeyFunctionDef) &&
- KeyFunctionDef->isInlined()))
- Diag(Class->getLocation(), Class->getTemplateSpecializationKind() ==
- TSK_ExplicitInstantiationDefinition
- ? diag::warn_weak_template_vtable : diag::warn_weak_vtable)
- << Class;
+ if (!KeyFunction || (KeyFunction->hasBody(KeyFunctionDef) &&
+ KeyFunctionDef->isInlined())) {
+ Diag(Class->getLocation(),
+ ClassTSK == TSK_ExplicitInstantiationDefinition
+ ? diag::warn_weak_template_vtable
+ : diag::warn_weak_vtable)
+ << Class;
+ }
}
}
VTableUses.clear();
diff --git a/lib/Sema/SemaDeclObjC.cpp b/lib/Sema/SemaDeclObjC.cpp
index 738de77cecb7..d172c951e749 100644
--- a/lib/Sema/SemaDeclObjC.cpp
+++ b/lib/Sema/SemaDeclObjC.cpp
@@ -11,22 +11,22 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
+#include "TypeLocBuilder.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
-#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "TypeLocBuilder.h"
using namespace clang;
@@ -209,11 +209,11 @@ bool Sema::CheckARCMethodDecl(ObjCMethodDecl *method) {
if (!Context.hasSameType(method->getReturnType(), Context.VoidTy)) {
SourceRange ResultTypeRange = method->getReturnTypeSourceRange();
if (ResultTypeRange.isInvalid())
- Diag(method->getLocation(), diag::error_dealloc_bad_result_type)
+ Diag(method->getLocation(), diag::err_dealloc_bad_result_type)
<< method->getReturnType()
<< FixItHint::CreateInsertion(method->getSelectorLoc(0), "(void)");
else
- Diag(method->getLocation(), diag::error_dealloc_bad_result_type)
+ Diag(method->getLocation(), diag::err_dealloc_bad_result_type)
<< method->getReturnType()
<< FixItHint::CreateReplacement(ResultTypeRange, "void");
return true;
@@ -1028,6 +1028,7 @@ ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc,
/// typedef'ed use for a qualified super class and adds them to the list
/// of the protocols.
void Sema::ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
+ SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc) {
if (!SuperName)
@@ -1040,8 +1041,14 @@ void Sema::ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
if (const TypedefNameDecl *TDecl = dyn_cast_or_null<TypedefNameDecl>(IDecl)) {
QualType T = TDecl->getUnderlyingType();
if (T->isObjCObjectType())
- if (const ObjCObjectType *OPT = T->getAs<ObjCObjectType>())
+ if (const ObjCObjectType *OPT = T->getAs<ObjCObjectType>()) {
ProtocolRefs.append(OPT->qual_begin(), OPT->qual_end());
+ // FIXME: Consider whether this should be an invalid loc since the loc
+ // is not actually pointing to a protocol name reference but to the
+ // typedef reference. Note that the base class name loc is also pointing
+ // at the typedef.
+ ProtocolLocs.append(OPT->getNumProtocols(), SuperLoc);
+ }
}
}
@@ -2353,7 +2360,7 @@ static bool CheckMethodOverrideParam(Sema &S,
}
if (S.Context.hasSameUnqualifiedType(ImplTy, IfaceTy))
return true;
-
+
if (!Warn)
return false;
unsigned DiagID =
@@ -2741,7 +2748,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
} else {
ObjCMethodDecl *ImpMethodDecl =
IMPDecl->getInstanceMethod(I->getSelector());
- assert(CDecl->getInstanceMethod(I->getSelector()) &&
+ assert(CDecl->getInstanceMethod(I->getSelector(), true/*AllowHidden*/) &&
"Expected to find the method through lookup as well");
// ImpMethodDecl may be null as in a @dynamic property.
if (ImpMethodDecl) {
@@ -2767,7 +2774,7 @@ void Sema::MatchAllMethodDeclarations(const SelectorSet &InsMap,
} else {
ObjCMethodDecl *ImpMethodDecl =
IMPDecl->getClassMethod(I->getSelector());
- assert(CDecl->getClassMethod(I->getSelector()) &&
+ assert(CDecl->getClassMethod(I->getSelector(), true/*AllowHidden*/) &&
"Expected to find the method through lookup as well");
// ImpMethodDecl may be null as in a @dynamic property.
if (ImpMethodDecl) {
@@ -3217,7 +3224,7 @@ void Sema::addMethodToGlobalList(ObjCMethodList *List,
ObjCMethodList *ListWithSameDeclaration = nullptr;
for (; List; Previous = List, List = List->getNext()) {
// If we are building a module, keep all of the methods.
- if (getLangOpts().CompilingModule)
+ if (getLangOpts().isCompilingModule())
continue;
bool SameDeclaration = MatchTwoMethodDeclarations(Method,
@@ -3853,6 +3860,18 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
Diag(IDecl->getLocation(), diag::err_objc_root_class_subclass);
}
+ if (const ObjCInterfaceDecl *Super = IDecl->getSuperClass()) {
+ // An interface can subclass another interface with a
+ // objc_subclassing_restricted attribute when it has that attribute as
+ // well (because of interfaces imported from Swift). Therefore we have
+ // to check if we can subclass in the implementation as well.
+ if (IDecl->hasAttr<ObjCSubclassingRestrictedAttr>() &&
+ Super->hasAttr<ObjCSubclassingRestrictedAttr>()) {
+ Diag(IC->getLocation(), diag::err_restricted_superclass_mismatch);
+ Diag(Super->getLocation(), diag::note_class_declared);
+ }
+ }
+
if (LangOpts.ObjCRuntime.isNonFragile()) {
while (IDecl->getSuperClass()) {
DiagnoseDuplicateIvars(IDecl, IDecl->getSuperClass());
@@ -3873,6 +3892,14 @@ Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods,
ImplMethodsVsClassMethods(S, CatImplClass, Cat);
}
}
+ } else if (const auto *IntfDecl = dyn_cast<ObjCInterfaceDecl>(ClassDecl)) {
+ if (const ObjCInterfaceDecl *Super = IntfDecl->getSuperClass()) {
+ if (!IntfDecl->hasAttr<ObjCSubclassingRestrictedAttr>() &&
+ Super->hasAttr<ObjCSubclassingRestrictedAttr>()) {
+ Diag(IntfDecl->getLocation(), diag::err_restricted_superclass_mismatch);
+ Diag(Super->getLocation(), diag::note_class_declared);
+ }
+ }
}
if (isInterfaceDeclKind) {
// Reject invalid vardecls.
@@ -4290,7 +4317,7 @@ Decl *Sema::ActOnMethodDeclaration(
bool isVariadic, bool MethodDefinition) {
// Make sure we can establish a context for the method.
if (!CurContext->isObjCContainer()) {
- Diag(MethodLoc, diag::error_missing_method_context);
+ Diag(MethodLoc, diag::err_missing_method_context);
return nullptr;
}
ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
diff --git a/lib/Sema/SemaExceptionSpec.cpp b/lib/Sema/SemaExceptionSpec.cpp
index 4a21eb308fe5..2ac2aca6f660 100644
--- a/lib/Sema/SemaExceptionSpec.cpp
+++ b/lib/Sema/SemaExceptionSpec.cpp
@@ -43,22 +43,36 @@ bool Sema::isLibstdcxxEagerExceptionSpecHack(const Declarator &D) {
auto *RD = dyn_cast<CXXRecordDecl>(CurContext);
// All the problem cases are member functions named "swap" within class
- // templates declared directly within namespace std.
- if (!RD || RD->getEnclosingNamespaceContext() != getStdNamespace() ||
- !RD->getIdentifier() || !RD->getDescribedClassTemplate() ||
+ // templates declared directly within namespace std or std::__debug or
+ // std::__profile.
+ if (!RD || !RD->getIdentifier() || !RD->getDescribedClassTemplate() ||
!D.getIdentifier() || !D.getIdentifier()->isStr("swap"))
return false;
+ auto *ND = dyn_cast<NamespaceDecl>(RD->getDeclContext());
+ if (!ND)
+ return false;
+
+ bool IsInStd = ND->isStdNamespace();
+ if (!IsInStd) {
+ // This isn't a direct member of namespace std, but it might still be
+ // libstdc++'s std::__debug::array or std::__profile::array.
+ IdentifierInfo *II = ND->getIdentifier();
+ if (!II || !(II->isStr("__debug") || II->isStr("__profile")) ||
+ !ND->isInStdNamespace())
+ return false;
+ }
+
// Only apply this hack within a system header.
if (!Context.getSourceManager().isInSystemHeader(D.getLocStart()))
return false;
return llvm::StringSwitch<bool>(RD->getIdentifier()->getName())
.Case("array", true)
- .Case("pair", true)
- .Case("priority_queue", true)
- .Case("stack", true)
- .Case("queue", true)
+ .Case("pair", IsInStd)
+ .Case("priority_queue", IsInStd)
+ .Case("stack", IsInStd)
+ .Case("queue", IsInStd)
.Default(false);
}
@@ -129,6 +143,11 @@ bool Sema::CheckSpecifiedExceptionType(QualType &T, SourceRange Range) {
/// to member to a function with an exception specification. This means that
/// it is invalid to add another level of indirection.
bool Sema::CheckDistantExceptionSpec(QualType T) {
+ // C++17 removes this rule in favor of putting exception specifications into
+ // the type system.
+ if (getLangOpts().CPlusPlus1z)
+ return false;
+
if (const PointerType *PT = T->getAs<PointerType>())
T = PT->getPointeeType();
else if (const MemberPointerType *PT = T->getAs<MemberPointerType>())
@@ -188,6 +207,14 @@ Sema::UpdateExceptionSpec(FunctionDecl *FD,
Context.adjustExceptionSpec(cast<FunctionDecl>(Redecl), ESI);
}
+static bool CheckEquivalentExceptionSpecImpl(
+ Sema &S, const PartialDiagnostic &DiagID, const PartialDiagnostic &NoteID,
+ const FunctionProtoType *Old, SourceLocation OldLoc,
+ const FunctionProtoType *New, SourceLocation NewLoc,
+ bool *MissingExceptionSpecification = nullptr,
+ bool *MissingEmptyExceptionSpecification = nullptr,
+ bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false);
+
/// Determine whether a function has an implicitly-generated exception
/// specification.
static bool hasImplicitExceptionSpec(FunctionDecl *Decl) {
@@ -210,6 +237,12 @@ static bool hasImplicitExceptionSpec(FunctionDecl *Decl) {
}
bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
+ // Just completely ignore this under -fno-exceptions prior to C++1z.
+ // In C++1z onwards, the exception specification is part of the type and
+ // we will diagnose mismatches anyway, so it's better to check for them here.
+ if (!getLangOpts().CXXExceptions && !getLangOpts().CPlusPlus1z)
+ return false;
+
OverloadedOperatorKind OO = New->getDeclName().getCXXOverloadedOperator();
bool IsOperatorNew = OO == OO_New || OO == OO_Array_New;
bool MissingExceptionSpecification = false;
@@ -224,8 +257,8 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
// Check the types as written: they must match before any exception
// specification adjustment is applied.
- if (!CheckEquivalentExceptionSpec(
- PDiag(DiagID), PDiag(diag::note_previous_declaration),
+ if (!CheckEquivalentExceptionSpecImpl(
+ *this, PDiag(DiagID), PDiag(diag::note_previous_declaration),
Old->getType()->getAs<FunctionProtoType>(), Old->getLocation(),
New->getType()->getAs<FunctionProtoType>(), New->getLocation(),
&MissingExceptionSpecification, &MissingEmptyExceptionSpecification,
@@ -234,7 +267,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
// If a declaration of a function has an implicit
// exception-specification, other declarations of the function shall
// not specify an exception-specification.
- if (getLangOpts().CPlusPlus11 &&
+ if (getLangOpts().CPlusPlus11 && getLangOpts().CXXExceptions &&
hasImplicitExceptionSpec(Old) != hasImplicitExceptionSpec(New)) {
Diag(New->getLocation(), diag::ext_implicit_exception_spec_mismatch)
<< hasImplicitExceptionSpec(Old);
@@ -255,14 +288,15 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
// The new function declaration is only missing an empty exception
// specification "throw()". If the throw() specification came from a
// function in a system header that has C linkage, just add an empty
- // exception specification to the "new" declaration. This is an
- // egregious workaround for glibc, which adds throw() specifications
- // to many libc functions as an optimization. Unfortunately, that
- // optimization isn't permitted by the C++ standard, so we're forced
- // to work around it here.
+ // exception specification to the "new" declaration. Note that C library
+ // implementations are permitted to add these nothrow exception
+ // specifications.
+ //
+ // Likewise if the old function is a builtin.
if (MissingEmptyExceptionSpecification && NewProto &&
(Old->getLocation().isInvalid() ||
- Context.getSourceManager().isInSystemHeader(Old->getLocation())) &&
+ Context.getSourceManager().isInSystemHeader(Old->getLocation()) ||
+ Old->getBuiltinID()) &&
Old->isExternC()) {
New->setType(Context.getFunctionType(
NewProto->getReturnType(), NewProto->getParamTypes(),
@@ -376,11 +410,15 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
bool Sema::CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc) {
+ if (!getLangOpts().CXXExceptions)
+ return false;
+
unsigned DiagID = diag::err_mismatched_exception_spec;
if (getLangOpts().MicrosoftExt)
DiagID = diag::ext_mismatched_exception_spec;
- bool Result = CheckEquivalentExceptionSpec(PDiag(DiagID),
- PDiag(diag::note_previous_declaration), Old, OldLoc, New, NewLoc);
+ bool Result = CheckEquivalentExceptionSpecImpl(
+ *this, PDiag(DiagID), PDiag(diag::note_previous_declaration),
+ Old, OldLoc, New, NewLoc);
// In Microsoft mode, mismatching exception specifications just cause a warning.
if (getLangOpts().MicrosoftExt)
@@ -394,30 +432,23 @@ bool Sema::CheckEquivalentExceptionSpec(
/// \return \c false if the exception specifications match, \c true if there is
/// a problem. If \c true is returned, either a diagnostic has already been
/// produced or \c *MissingExceptionSpecification is set to \c true.
-bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
- const PartialDiagnostic & NoteID,
- const FunctionProtoType *Old,
- SourceLocation OldLoc,
- const FunctionProtoType *New,
- SourceLocation NewLoc,
- bool *MissingExceptionSpecification,
- bool*MissingEmptyExceptionSpecification,
- bool AllowNoexceptAllMatchWithNoSpec,
- bool IsOperatorNew) {
- // Just completely ignore this under -fno-exceptions.
- if (!getLangOpts().CXXExceptions)
- return false;
-
+static bool CheckEquivalentExceptionSpecImpl(
+ Sema &S, const PartialDiagnostic &DiagID, const PartialDiagnostic &NoteID,
+ const FunctionProtoType *Old, SourceLocation OldLoc,
+ const FunctionProtoType *New, SourceLocation NewLoc,
+ bool *MissingExceptionSpecification,
+ bool *MissingEmptyExceptionSpecification,
+ bool AllowNoexceptAllMatchWithNoSpec, bool IsOperatorNew) {
if (MissingExceptionSpecification)
*MissingExceptionSpecification = false;
if (MissingEmptyExceptionSpecification)
*MissingEmptyExceptionSpecification = false;
- Old = ResolveExceptionSpec(NewLoc, Old);
+ Old = S.ResolveExceptionSpec(NewLoc, Old);
if (!Old)
return false;
- New = ResolveExceptionSpec(NewLoc, New);
+ New = S.ResolveExceptionSpec(NewLoc, New);
if (!New)
return false;
@@ -451,8 +482,8 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
if (OldEST == EST_None && NewEST == EST_None)
return false;
- FunctionProtoType::NoexceptResult OldNR = Old->getNoexceptSpec(Context);
- FunctionProtoType::NoexceptResult NewNR = New->getNoexceptSpec(Context);
+ FunctionProtoType::NoexceptResult OldNR = Old->getNoexceptSpec(S.Context);
+ FunctionProtoType::NoexceptResult NewNR = New->getNoexceptSpec(S.Context);
if (OldNR == FunctionProtoType::NR_BadNoexcept ||
NewNR == FunctionProtoType::NR_BadNoexcept)
return false;
@@ -467,9 +498,9 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
if (OldNR != NewNR &&
OldNR != FunctionProtoType::NR_NoNoexcept &&
NewNR != FunctionProtoType::NR_NoNoexcept) {
- Diag(NewLoc, DiagID);
+ S.Diag(NewLoc, DiagID);
if (NoteID.getDiagID() != 0 && OldLoc.isValid())
- Diag(OldLoc, NoteID);
+ S.Diag(OldLoc, NoteID);
return true;
}
@@ -507,7 +538,7 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
// As a special compatibility feature, under C++0x we accept no spec and
// throw(std::bad_alloc) as equivalent for operator new and operator new[].
// This is because the implicit declaration changed, but old code would break.
- if (getLangOpts().CPlusPlus11 && IsOperatorNew) {
+ if (S.getLangOpts().CPlusPlus11 && IsOperatorNew) {
const FunctionProtoType *WithExceptions = nullptr;
if (OldEST == EST_None && NewEST == EST_Dynamic)
WithExceptions = New;
@@ -548,9 +579,9 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
return true;
}
- Diag(NewLoc, DiagID);
+ S.Diag(NewLoc, DiagID);
if (NoteID.getDiagID() != 0 && OldLoc.isValid())
- Diag(OldLoc, NoteID);
+ S.Diag(OldLoc, NoteID);
return true;
}
@@ -562,11 +593,11 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
// to the second.
llvm::SmallPtrSet<CanQualType, 8> OldTypes, NewTypes;
for (const auto &I : Old->exceptions())
- OldTypes.insert(Context.getCanonicalType(I).getUnqualifiedType());
+ OldTypes.insert(S.Context.getCanonicalType(I).getUnqualifiedType());
for (const auto &I : New->exceptions()) {
- CanQualType TypePtr = Context.getCanonicalType(I).getUnqualifiedType();
- if(OldTypes.count(TypePtr))
+ CanQualType TypePtr = S.Context.getCanonicalType(I).getUnqualifiedType();
+ if (OldTypes.count(TypePtr))
NewTypes.insert(TypePtr);
else
Success = false;
@@ -577,19 +608,34 @@ bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
if (Success) {
return false;
}
- Diag(NewLoc, DiagID);
+ S.Diag(NewLoc, DiagID);
if (NoteID.getDiagID() != 0 && OldLoc.isValid())
- Diag(OldLoc, NoteID);
+ S.Diag(OldLoc, NoteID);
return true;
}
+bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
+ const PartialDiagnostic &NoteID,
+ const FunctionProtoType *Old,
+ SourceLocation OldLoc,
+ const FunctionProtoType *New,
+ SourceLocation NewLoc) {
+ if (!getLangOpts().CXXExceptions)
+ return false;
+ return CheckEquivalentExceptionSpecImpl(*this, DiagID, NoteID, Old, OldLoc,
+ New, NewLoc);
+}
+
/// CheckExceptionSpecSubset - Check whether the second function type's
/// exception specification is a subset (or equivalent) of the first function
/// type. This is used by override and pointer assignment checks.
-bool Sema::CheckExceptionSpecSubset(
- const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
- const FunctionProtoType *Superset, SourceLocation SuperLoc,
- const FunctionProtoType *Subset, SourceLocation SubLoc) {
+bool Sema::CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
+ const PartialDiagnostic &NestedDiagID,
+ const PartialDiagnostic &NoteID,
+ const FunctionProtoType *Superset,
+ SourceLocation SuperLoc,
+ const FunctionProtoType *Subset,
+ SourceLocation SubLoc) {
// Just auto-succeed under -fno-exceptions.
if (!getLangOpts().CXXExceptions)
@@ -613,7 +659,8 @@ bool Sema::CheckExceptionSpecSubset(
// If superset contains everything, we're done.
if (SuperEST == EST_None || SuperEST == EST_MSAny)
- return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+ return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset, SuperLoc,
+ Subset, SubLoc);
// If there are dependent noexcept specs, assume everything is fine. Unlike
// with the equivalency check, this is safe in this case, because we don't
@@ -628,7 +675,8 @@ bool Sema::CheckExceptionSpecSubset(
// Another case of the superset containing everything.
if (SuperNR == FunctionProtoType::NR_Throw)
- return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+ return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset, SuperLoc,
+ Subset, SubLoc);
ExceptionSpecificationType SubEST = Subset->getExceptionSpecType();
@@ -659,7 +707,8 @@ bool Sema::CheckExceptionSpecSubset(
// If the subset contains nothing, we're done.
if (SubEST == EST_DynamicNone || SubNR == FunctionProtoType::NR_Nothrow)
- return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+ return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset, SuperLoc,
+ Subset, SubLoc);
// Otherwise, if the superset contains nothing, we've failed.
if (SuperEST == EST_DynamicNone || SuperNR == FunctionProtoType::NR_Nothrow) {
@@ -751,14 +800,15 @@ bool Sema::CheckExceptionSpecSubset(
}
}
// We've run half the gauntlet.
- return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+ return CheckParamExceptionSpec(NestedDiagID, NoteID, Superset, SuperLoc,
+ Subset, SubLoc);
}
-static bool CheckSpecForTypesEquivalent(Sema &S,
- const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
- QualType Target, SourceLocation TargetLoc,
- QualType Source, SourceLocation SourceLoc)
-{
+static bool
+CheckSpecForTypesEquivalent(Sema &S, const PartialDiagnostic &DiagID,
+ const PartialDiagnostic &NoteID, QualType Target,
+ SourceLocation TargetLoc, QualType Source,
+ SourceLocation SourceLoc) {
const FunctionProtoType *TFunc = GetUnderlyingFunction(Target);
if (!TFunc)
return false;
@@ -775,13 +825,16 @@ static bool CheckSpecForTypesEquivalent(Sema &S,
/// assignment and override compatibility check. We do not check the parameters
/// of parameter function pointers recursively, as no sane programmer would
/// even be able to write such a function type.
-bool Sema::CheckParamExceptionSpec(const PartialDiagnostic &NoteID,
+bool Sema::CheckParamExceptionSpec(const PartialDiagnostic &DiagID,
+ const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc) {
+ auto RetDiag = DiagID;
+ RetDiag << 0;
if (CheckSpecForTypesEquivalent(
- *this, PDiag(diag::err_deep_exception_specs_differ) << 0, PDiag(),
+ *this, RetDiag, PDiag(),
Target->getReturnType(), TargetLoc, Source->getReturnType(),
SourceLoc))
return true;
@@ -791,8 +844,10 @@ bool Sema::CheckParamExceptionSpec(const PartialDiagnostic &NoteID,
assert(Target->getNumParams() == Source->getNumParams() &&
"Functions have different argument counts.");
for (unsigned i = 0, E = Target->getNumParams(); i != E; ++i) {
+ auto ParamDiag = DiagID;
+ ParamDiag << 1;
if (CheckSpecForTypesEquivalent(
- *this, PDiag(diag::err_deep_exception_specs_differ) << 1, PDiag(),
+ *this, ParamDiag, PDiag(),
Target->getParamType(i), TargetLoc, Source->getParamType(i),
SourceLoc))
return true;
@@ -812,6 +867,16 @@ bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType) {
if (!FromFunc || FromFunc->hasDependentExceptionSpec())
return false;
+ unsigned DiagID = diag::err_incompatible_exception_specs;
+ unsigned NestedDiagID = diag::err_deep_exception_specs_differ;
+ // This is not an error in C++17 onwards, unless the noexceptness doesn't
+ // match, but in that case we have a full-on type mismatch, not just a
+ // type sugar mismatch.
+ if (getLangOpts().CPlusPlus1z) {
+ DiagID = diag::warn_incompatible_exception_specs;
+ NestedDiagID = diag::warn_deep_exception_specs_differ;
+ }
+
// Now we've got the correct types on both sides, check their compatibility.
// This means that the source of the conversion can only throw a subset of
// the exceptions of the target, and any exception specs on arguments or
@@ -824,10 +889,10 @@ bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType) {
// void (*q)(void (*) throw(int)) = p;
// }
// ... because it might be instantiated with T=int.
- return CheckExceptionSpecSubset(PDiag(diag::err_incompatible_exception_specs),
- PDiag(), ToFunc,
- From->getSourceRange().getBegin(),
- FromFunc, SourceLocation());
+ return CheckExceptionSpecSubset(PDiag(DiagID), PDiag(NestedDiagID), PDiag(),
+ ToFunc, From->getSourceRange().getBegin(),
+ FromFunc, SourceLocation()) &&
+ !getLangOpts().CPlusPlus1z;
}
bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
@@ -861,6 +926,7 @@ bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
if (getLangOpts().MicrosoftExt)
DiagID = diag::ext_override_exception_spec;
return CheckExceptionSpecSubset(PDiag(DiagID),
+ PDiag(diag::err_deep_exception_specs_differ),
PDiag(diag::note_overridden_virtual_function),
Old->getType()->getAs<FunctionProtoType>(),
Old->getLocation(),
@@ -879,19 +945,37 @@ static CanThrowResult canSubExprsThrow(Sema &S, const Expr *E) {
}
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D) {
- assert(D && "Expected decl");
-
- // See if we can get a function type from the decl somehow.
- const ValueDecl *VD = dyn_cast<ValueDecl>(D);
- if (!VD) // If we have no clue what we're calling, assume the worst.
- return CT_Can;
-
// As an extension, we assume that __attribute__((nothrow)) functions don't
// throw.
- if (isa<FunctionDecl>(D) && D->hasAttr<NoThrowAttr>())
+ if (D && isa<FunctionDecl>(D) && D->hasAttr<NoThrowAttr>())
return CT_Cannot;
- QualType T = VD->getType();
+ QualType T;
+
+ // In C++1z, just look at the function type of the callee.
+ if (S.getLangOpts().CPlusPlus1z && isa<CallExpr>(E)) {
+ E = cast<CallExpr>(E)->getCallee();
+ T = E->getType();
+ if (T->isSpecificPlaceholderType(BuiltinType::BoundMember)) {
+ // Sadly we don't preserve the actual type as part of the "bound member"
+ // placeholder, so we need to reconstruct it.
+ E = E->IgnoreParenImpCasts();
+
+ // Could be a call to a pointer-to-member or a plain member access.
+ if (auto *Op = dyn_cast<BinaryOperator>(E)) {
+ assert(Op->getOpcode() == BO_PtrMemD || Op->getOpcode() == BO_PtrMemI);
+ T = Op->getRHS()->getType()
+ ->castAs<MemberPointerType>()->getPointeeType();
+ } else {
+ T = cast<MemberExpr>(E)->getMemberDecl()->getType();
+ }
+ }
+ } else if (const ValueDecl *VD = dyn_cast_or_null<ValueDecl>(D))
+ T = VD->getType();
+ else
+ // If we have no clue what we're calling, assume the worst.
+ return CT_Can;
+
const FunctionProtoType *FT;
if ((FT = T->getAs<FunctionProtoType>())) {
} else if (const PointerType *PT = T->getAs<PointerType>())
@@ -983,10 +1067,8 @@ CanThrowResult Sema::canThrow(const Expr *E) {
CT = CT_Dependent;
else if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens()))
CT = CT_Cannot;
- else if (CE->getCalleeDecl())
- CT = canCalleeThrow(*this, E, CE->getCalleeDecl());
else
- CT = CT_Can;
+ CT = canCalleeThrow(*this, E, CE->getCalleeDecl());
if (CT == CT_Can)
return CT;
return mergeCanThrow(CT, canSubExprsThrow(*this, E));
@@ -1085,6 +1167,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
case Expr::ExprWithCleanupsClass:
case Expr::ExtVectorElementExprClass:
case Expr::InitListExprClass:
+ case Expr::ArrayInitLoopExprClass:
case Expr::MemberExprClass:
case Expr::ObjCIsaExprClass:
case Expr::ObjCIvarRefExprClass:
@@ -1178,6 +1261,7 @@ CanThrowResult Sema::canThrow(const Expr *E) {
case Expr::ImaginaryLiteralClass:
case Expr::ImplicitValueInitExprClass:
case Expr::IntegerLiteralClass:
+ case Expr::ArrayInitIndexExprClass:
case Expr::NoInitExprClass:
case Expr::ObjCEncodeExprClass:
case Expr::ObjCStringLiteralClass:
diff --git a/lib/Sema/SemaExpr.cpp b/lib/Sema/SemaExpr.cpp
index 719e1e3502ca..3c554c9a5244 100644
--- a/lib/Sema/SemaExpr.cpp
+++ b/lib/Sema/SemaExpr.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "TreeTransform.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
@@ -42,6 +41,7 @@
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaFixItUtils.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "llvm/Support/ConvertUTF.h"
using namespace clang;
@@ -103,13 +103,9 @@ static bool HasRedeclarationWithoutAvailabilityInCategory(const Decl *D) {
return false;
}
-static AvailabilityResult
-DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc,
- const ObjCInterfaceDecl *UnknownObjCClass,
- bool ObjCPropertyAccess) {
- // See if this declaration is unavailable or deprecated.
- std::string Message;
- AvailabilityResult Result = D->getAvailability(&Message);
+AvailabilityResult
+Sema::ShouldDiagnoseAvailabilityOfDecl(NamedDecl *&D, std::string *Message) {
+ AvailabilityResult Result = D->getAvailability(Message);
// For typedefs, if the typedef declaration appears available look
// to the underlying type to see if it is more restrictive.
@@ -117,18 +113,18 @@ DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc,
if (Result == AR_Available) {
if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
D = TT->getDecl();
- Result = D->getAvailability(&Message);
+ Result = D->getAvailability(Message);
continue;
}
}
break;
}
-
+
// Forward class declarations get their attributes from their definition.
if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) {
if (IDecl->getDefinition()) {
D = IDecl->getDefinition();
- Result = D->getAvailability(&Message);
+ Result = D->getAvailability(Message);
}
}
@@ -136,12 +132,51 @@ DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc,
if (Result == AR_Available) {
const DeclContext *DC = ECD->getDeclContext();
if (const EnumDecl *TheEnumDecl = dyn_cast<EnumDecl>(DC))
- Result = TheEnumDecl->getAvailability(&Message);
+ Result = TheEnumDecl->getAvailability(Message);
+ }
+
+ if (Result == AR_NotYetIntroduced) {
+ // Don't do this for enums, they can't be redeclared.
+ if (isa<EnumConstantDecl>(D) || isa<EnumDecl>(D))
+ return AR_Available;
+
+ bool Warn = !D->getAttr<AvailabilityAttr>()->isInherited();
+ // Objective-C method declarations in categories are not modelled as
+ // redeclarations, so manually look for a redeclaration in a category
+ // if necessary.
+ if (Warn && HasRedeclarationWithoutAvailabilityInCategory(D))
+ Warn = false;
+ // In general, D will point to the most recent redeclaration. However,
+ // for `@class A;` decls, this isn't true -- manually go through the
+ // redecl chain in that case.
+ if (Warn && isa<ObjCInterfaceDecl>(D))
+ for (Decl *Redecl = D->getMostRecentDecl(); Redecl && Warn;
+ Redecl = Redecl->getPreviousDecl())
+ if (!Redecl->hasAttr<AvailabilityAttr>() ||
+ Redecl->getAttr<AvailabilityAttr>()->isInherited())
+ Warn = false;
+
+ return Warn ? AR_NotYetIntroduced : AR_Available;
+ }
+
+ return Result;
+}
+
+static void
+DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ bool ObjCPropertyAccess) {
+ std::string Message;
+ // See if this declaration is unavailable, deprecated, or partial.
+ if (AvailabilityResult Result =
+ S.ShouldDiagnoseAvailabilityOfDecl(D, &Message)) {
+
+ if (Result == AR_NotYetIntroduced && S.getCurFunctionOrMethodDecl()) {
+ S.getEnclosingFunction()->HasPotentialAvailabilityViolations = true;
+ return;
}
- const ObjCPropertyDecl *ObjCPDecl = nullptr;
- if (Result == AR_Deprecated || Result == AR_Unavailable ||
- Result == AR_NotYetIntroduced) {
+ const ObjCPropertyDecl *ObjCPDecl = nullptr;
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) {
AvailabilityResult PDeclResult = PD->getAvailability(nullptr);
@@ -149,56 +184,10 @@ DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc,
ObjCPDecl = PD;
}
}
- }
-
- switch (Result) {
- case AR_Available:
- break;
-
- case AR_Deprecated:
- if (S.getCurContextAvailability() != AR_Deprecated)
- S.EmitAvailabilityWarning(Sema::AD_Deprecation,
- D, Message, Loc, UnknownObjCClass, ObjCPDecl,
- ObjCPropertyAccess);
- break;
-
- case AR_NotYetIntroduced: {
- // Don't do this for enums, they can't be redeclared.
- if (isa<EnumConstantDecl>(D) || isa<EnumDecl>(D))
- break;
-
- bool Warn = !D->getAttr<AvailabilityAttr>()->isInherited();
- // Objective-C method declarations in categories are not modelled as
- // redeclarations, so manually look for a redeclaration in a category
- // if necessary.
- if (Warn && HasRedeclarationWithoutAvailabilityInCategory(D))
- Warn = false;
- // In general, D will point to the most recent redeclaration. However,
- // for `@class A;` decls, this isn't true -- manually go through the
- // redecl chain in that case.
- if (Warn && isa<ObjCInterfaceDecl>(D))
- for (Decl *Redecl = D->getMostRecentDecl(); Redecl && Warn;
- Redecl = Redecl->getPreviousDecl())
- if (!Redecl->hasAttr<AvailabilityAttr>() ||
- Redecl->getAttr<AvailabilityAttr>()->isInherited())
- Warn = false;
-
- if (Warn)
- S.EmitAvailabilityWarning(Sema::AD_Partial, D, Message, Loc,
- UnknownObjCClass, ObjCPDecl,
- ObjCPropertyAccess);
- break;
- }
- case AR_Unavailable:
- if (S.getCurContextAvailability() != AR_Unavailable)
- S.EmitAvailabilityWarning(Sema::AD_Unavailable,
- D, Message, Loc, UnknownObjCClass, ObjCPDecl,
- ObjCPropertyAccess);
- break;
-
- }
- return Result;
+ S.EmitAvailabilityWarning(Result, D, Message, Loc, UnknownObjCClass,
+ ObjCPDecl, ObjCPropertyAccess);
+ }
}
/// \brief Emit a note explaining that this function is deleted.
@@ -340,10 +329,15 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
// See if this is an auto-typed variable whose initializer we are parsing.
if (ParsingInitForAutoVars.count(D)) {
- const AutoType *AT = cast<VarDecl>(D)->getType()->getContainedAutoType();
+ if (isa<BindingDecl>(D)) {
+ Diag(Loc, diag::err_binding_cannot_appear_in_own_initializer)
+ << D->getDeclName();
+ } else {
+ const AutoType *AT = cast<VarDecl>(D)->getType()->getContainedAutoType();
- Diag(Loc, diag::err_auto_variable_cannot_appear_in_own_initializer)
- << D->getDeclName() << (unsigned)AT->getKeyword();
+ Diag(Loc, diag::err_auto_variable_cannot_appear_in_own_initializer)
+ << D->getDeclName() << (unsigned)AT->getKeyword();
+ }
return true;
}
@@ -366,6 +360,9 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
if (getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() &&
DeduceReturnType(FD, Loc))
return true;
+
+ if (getLangOpts().CUDA && !CheckCUDACall(Loc, FD))
+ return true;
}
// [OpenMP 4.0], 2.15 declare reduction Directive, Restrictions
@@ -660,7 +657,7 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
return E;
// OpenCL usually rejects direct accesses to values of 'half' type.
- if (getLangOpts().OpenCL && !getOpenCLOptions().cl_khr_fp16 &&
+ if (getLangOpts().OpenCL && !getOpenCLOptions().isEnabled("cl_khr_fp16") &&
T->isHalfType()) {
Diag(E->getExprLoc(), diag::err_opencl_half_load_store)
<< 0 << T;
@@ -820,8 +817,16 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
// double.
const BuiltinType *BTy = Ty->getAs<BuiltinType>();
if (BTy && (BTy->getKind() == BuiltinType::Half ||
- BTy->getKind() == BuiltinType::Float))
- E = ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast).get();
+ BTy->getKind() == BuiltinType::Float)) {
+ if (getLangOpts().OpenCL &&
+ !getOpenCLOptions().isEnabled("cl_khr_fp64")) {
+ if (BTy->getKind() == BuiltinType::Half) {
+ E = ImpCastExprToType(E, Context.FloatTy, CK_FloatingCast).get();
+ }
+ } else {
+ E = ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast).get();
+ }
+ }
// C++ performs lvalue-to-rvalue conversion as a default argument
// promotion, even on class types, but note:
@@ -1189,7 +1194,7 @@ static bool unsupportedTypeConversion(const Sema &S, QualType LHSType,
*/
return Float128AndLongDouble &&
(&S.Context.getFloatTypeSemantics(S.Context.LongDoubleTy) !=
- &llvm::APFloat::IEEEdouble);
+ &llvm::APFloat::IEEEdouble());
}
typedef ExprResult PerformCastFn(Sema &S, Expr *operand, QualType toType);
@@ -1735,19 +1740,6 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS, NamedDecl *FoundD,
const TemplateArgumentListInfo *TemplateArgs) {
- if (getLangOpts().CUDA)
- if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
- if (const FunctionDecl *Callee = dyn_cast<FunctionDecl>(D)) {
- if (CheckCUDATarget(Caller, Callee)) {
- Diag(NameInfo.getLoc(), diag::err_ref_bad_target)
- << IdentifyCUDATarget(Callee) << D->getIdentifier()
- << IdentifyCUDATarget(Caller);
- Diag(D->getLocation(), diag::note_previous_decl)
- << D->getIdentifier();
- return ExprError();
- }
- }
-
bool RefersToCapturedVariable =
isa<VarDecl>(D) &&
NeedToCaptureVariable(cast<VarDecl>(D), NameInfo.getLoc());
@@ -1785,6 +1777,12 @@ Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
E->setObjectKind(OK_BitField);
}
+ // C++ [expr.prim]/8: The expression [...] is a bit-field if the identifier
+ // designates a bit-field.
+ if (auto *BD = dyn_cast<BindingDecl>(D))
+ if (auto *BE = BD->getBinding())
+ E->setObjectKind(BE->getObjectKind());
+
return E;
}
@@ -2462,7 +2460,7 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
if (IFace && (IV = IFace->lookupInstanceVariable(II, ClassDeclared))) {
// Diagnose using an ivar in a class method.
if (IsClassMethod)
- return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method)
+ return ExprError(Diag(Loc, diag::err_ivar_use_in_class_method)
<< IV->getDeclName());
// If we're referencing an invalid decl, just return this as a silent
@@ -2478,7 +2476,7 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
if (IV->getAccessControl() == ObjCIvarDecl::Private &&
!declaresSameEntity(ClassDeclared, IFace) &&
!getLangOpts().DebuggerSupport)
- Diag(Loc, diag::error_private_ivar_access) << IV->getDeclName();
+ Diag(Loc, diag::err_private_ivar_access) << IV->getDeclName();
// FIXME: This should use a new expr for a direct reference, don't
// turn this into Self->ivar, just return a BareIVarExpr or something.
@@ -2534,7 +2532,7 @@ Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod()) {
// If accessing a stand-alone ivar in a class method, this is an error.
if (const ObjCIvarDecl *IV = dyn_cast<ObjCIvarDecl>(Lookup.getFoundDecl()))
- return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method)
+ return ExprError(Diag(Loc, diag::err_ivar_use_in_class_method)
<< IV->getDeclName());
}
@@ -2829,6 +2827,10 @@ ExprResult Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
return ULE;
}
+static void
+diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
+ ValueDecl *var, DeclContext *DC);
+
/// \brief Complete semantic analysis for a reference to the given declaration.
ExprResult Sema::BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
@@ -2881,6 +2883,14 @@ ExprResult Sema::BuildDeclarationNameExpr(
{
QualType type = VD->getType();
+ if (auto *FPT = type->getAs<FunctionProtoType>()) {
+ // C++ [except.spec]p17:
+ // An exception-specification is considered to be needed when:
+ // - in an expression, the function is the unique lookup result or
+ // the selected member of a set of overloaded functions.
+ ResolveExceptionSpec(Loc, FPT);
+ type = VD->getType();
+ }
ExprValueKind valueKind = VK_RValue;
switch (D->getKind()) {
@@ -2939,6 +2949,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
case Decl::Var:
case Decl::VarTemplateSpecialization:
case Decl::VarTemplatePartialSpecialization:
+ case Decl::Decomposition:
case Decl::OMPCapturedExpr:
// In C, "extern void blah;" is valid and is an r-value.
if (!getLangOpts().CPlusPlus &&
@@ -2966,6 +2977,19 @@ ExprResult Sema::BuildDeclarationNameExpr(
break;
}
+
+ case Decl::Binding: {
+ // These are always lvalues.
+ valueKind = VK_LValue;
+ type = type.getNonReferenceType();
+ // FIXME: Support lambda-capture of BindingDecls, once CWG actually
+ // decides how that's supposed to work.
+ auto *BD = cast<BindingDecl>(VD);
+ if (BD->getDeclContext()->isFunctionOrMethod() &&
+ BD->getDeclContext() != CurContext)
+ diagnoseUncapturableValueReference(*this, Loc, BD, CurContext);
+ break;
+ }
case Decl::Function: {
if (unsigned BID = cast<FunctionDecl>(VD)->getBuiltinID()) {
@@ -3046,8 +3070,9 @@ static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
SmallString<32> &Target) {
Target.resize(CharByteWidth * (Source.size() + 1));
char *ResultPtr = &Target[0];
- const UTF8 *ErrorPtr;
- bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
+ const llvm::UTF8 *ErrorPtr;
+ bool success =
+ llvm::ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
(void)success;
assert(success);
Target.resize(ResultPtr - &Target[0]);
@@ -3361,7 +3386,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
if (Literal.isFloatingLiteral()) {
QualType Ty;
if (Literal.isHalf){
- if (getOpenCLOptions().cl_khr_fp16)
+ if (getOpenCLOptions().isEnabled("cl_khr_fp16"))
Ty = Context.HalfTy;
else {
Diag(Tok.getLocation(), diag::err_half_const_requires_fp16);
@@ -3380,10 +3405,13 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
if (Ty == Context.DoubleTy) {
if (getLangOpts().SinglePrecisionConstants) {
- Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get();
+ const BuiltinType *BTy = Ty->getAs<BuiltinType>();
+ if (BTy->getKind() != BuiltinType::Float) {
+ Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get();
+ }
} else if (getLangOpts().OpenCL &&
- !((getLangOpts().OpenCLVersion >= 120) ||
- getOpenCLOptions().cl_khr_fp64)) {
+ !getOpenCLOptions().isEnabled("cl_khr_fp64")) {
+ // Impose single-precision float type when cl_khr_fp64 is not enabled.
Diag(Tok.getLocation(), diag::warn_double_const_requires_fp64);
Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get();
}
@@ -3493,7 +3521,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
// To be compatible with MSVC, hex integer literals ending with the
// LL or i64 suffix are always signed in Microsoft mode.
if (!Literal.isUnsigned && (ResultVal[LongLongSize-1] == 0 ||
- (getLangOpts().MicrosoftExt && Literal.isLongLong)))
+ (getLangOpts().MSVCCompat && Literal.isLongLong)))
Ty = Context.LongLongTy;
else if (AllowUnsigned)
Ty = Context.UnsignedLongLongTy;
@@ -3852,6 +3880,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
+ case Type::ObjCTypeParam:
case Type::Pipe:
llvm_unreachable("type class is never variably-modified!");
case Type::Adjusted:
@@ -4304,14 +4333,13 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
diag::err_omp_section_incomplete_type, Base))
return ExprError();
- if (LowerBound) {
+ if (LowerBound && !OriginalTy->isAnyPointerType()) {
llvm::APSInt LowerBoundValue;
if (LowerBound->EvaluateAsInt(LowerBoundValue, Context)) {
- // OpenMP 4.0, [2.4 Array Sections]
- // The lower-bound and length must evaluate to non-negative integers.
+ // OpenMP 4.5, [2.4 Array Sections]
+ // The array section must be a subset of the original array.
if (LowerBoundValue.isNegative()) {
- Diag(LowerBound->getExprLoc(), diag::err_omp_section_negative)
- << 0 << LowerBoundValue.toString(/*Radix=*/10, /*Signed=*/true)
+ Diag(LowerBound->getExprLoc(), diag::err_omp_section_not_subset_of_array)
<< LowerBound->getSourceRange();
return ExprError();
}
@@ -4321,11 +4349,11 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
if (Length) {
llvm::APSInt LengthValue;
if (Length->EvaluateAsInt(LengthValue, Context)) {
- // OpenMP 4.0, [2.4 Array Sections]
- // The lower-bound and length must evaluate to non-negative integers.
+ // OpenMP 4.5, [2.4 Array Sections]
+ // The length must evaluate to non-negative integers.
if (LengthValue.isNegative()) {
- Diag(Length->getExprLoc(), diag::err_omp_section_negative)
- << 1 << LengthValue.toString(/*Radix=*/10, /*Signed=*/true)
+ Diag(Length->getExprLoc(), diag::err_omp_section_length_negative)
+ << LengthValue.toString(/*Radix=*/10, /*Signed=*/true)
<< Length->getSourceRange();
return ExprError();
}
@@ -4333,7 +4361,7 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
} else if (ColonLoc.isValid() &&
(OriginalTy.isNull() || (!OriginalTy->isConstantArrayType() &&
!OriginalTy->isVariableArrayType()))) {
- // OpenMP 4.0, [2.4 Array Sections]
+ // OpenMP 4.5, [2.4 Array Sections]
// When the size of the array dimension is not known, the length must be
// specified explicitly.
Diag(ColonLoc, diag::err_omp_section_length_undefined)
@@ -4359,6 +4387,16 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *LHSExp = Base;
Expr *RHSExp = Idx;
+ ExprValueKind VK = VK_LValue;
+ ExprObjectKind OK = OK_Ordinary;
+
+ // Per C++ core issue 1213, the result is an xvalue if either operand is
+ // a non-lvalue array, and an lvalue otherwise.
+ if (getLangOpts().CPlusPlus11 &&
+ ((LHSExp->getType()->isArrayType() && !LHSExp->isLValue()) ||
+ (RHSExp->getType()->isArrayType() && !RHSExp->isLValue())))
+ VK = VK_XValue;
+
// Perform default conversions.
if (!LHSExp->getType()->getAs<VectorType>()) {
ExprResult Result = DefaultFunctionArrayLvalueConversion(LHSExp);
@@ -4372,8 +4410,6 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
RHSExp = Result.get();
QualType LHSTy = LHSExp->getType(), RHSTy = RHSExp->getType();
- ExprValueKind VK = VK_LValue;
- ExprObjectKind OK = OK_Ordinary;
// C99 6.5.2.1p2: the expression e1[e2] is by definition precisely equivalent
// to the expression *((e1)+(e2)). This means the array "Base" may actually be
@@ -4496,16 +4532,15 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
ArraySubscriptExpr(LHSExp, RHSExp, ResultType, VK, OK, RLoc);
}
-ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
- FunctionDecl *FD,
- ParmVarDecl *Param) {
+bool Sema::CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
+ ParmVarDecl *Param) {
if (Param->hasUnparsedDefaultArg()) {
Diag(CallLoc,
diag::err_use_of_default_argument_to_function_declared_later) <<
FD << cast<CXXRecordDecl>(FD->getDeclContext())->getDeclName();
Diag(UnparsedDefaultArgLocs[Param],
diag::note_default_argument_declared_here);
- return ExprError();
+ return true;
}
if (Param->hasUninstantiatedDefaultArg()) {
@@ -4521,11 +4556,11 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
InstantiatingTemplate Inst(*this, CallLoc, Param,
MutiLevelArgList.getInnermost());
if (Inst.isInvalid())
- return ExprError();
+ return true;
if (Inst.isAlreadyInstantiating()) {
Diag(Param->getLocStart(), diag::err_recursive_default_argument) << FD;
Param->setInvalidDecl();
- return ExprError();
+ return true;
}
ExprResult Result;
@@ -4536,10 +4571,11 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
// default argument expression appears.
ContextRAII SavedContext(*this, FD);
LocalInstantiationScope Local(*this);
- Result = SubstExpr(UninstExpr, MutiLevelArgList);
+ Result = SubstInitializer(UninstExpr, MutiLevelArgList,
+ /*DirectInit*/false);
}
if (Result.isInvalid())
- return ExprError();
+ return true;
// Check the expression as an initializer for the parameter.
InitializedEntity Entity
@@ -4552,12 +4588,12 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
InitializationSequence InitSeq(*this, Entity, Kind, ResultE);
Result = InitSeq.Perform(*this, Entity, Kind, ResultE);
if (Result.isInvalid())
- return ExprError();
+ return true;
Result = ActOnFinishFullExpr(Result.getAs<Expr>(),
Param->getOuterLocStart());
if (Result.isInvalid())
- return ExprError();
+ return true;
// Remember the instantiated default argument.
Param->setDefaultArg(Result.getAs<Expr>());
@@ -4570,7 +4606,7 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
if (!Param->hasInit()) {
Diag(Param->getLocStart(), diag::err_recursive_default_argument) << FD;
Param->setInvalidDecl();
- return ExprError();
+ return true;
}
// If the default expression creates temporaries, we need to
@@ -4597,9 +4633,15 @@ ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
// as being "referenced".
MarkDeclarationsReferencedInExpr(Param->getDefaultArg(),
/*SkipLocalVariables=*/true);
- return CXXDefaultArgExpr::Create(Context, CallLoc, Param);
+ return false;
}
+ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
+ FunctionDecl *FD, ParmVarDecl *Param) {
+ if (CheckCXXDefaultArgExpr(CallLoc, FD, Param))
+ return ExprError();
+ return CXXDefaultArgExpr::Create(Context, CallLoc, Param);
+}
Sema::VariadicCallType
Sema::getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto,
@@ -5057,7 +5099,11 @@ static FunctionDecl *rewriteBuiltinFunctionDecl(Sema *Sema, ASTContext &Context,
for (QualType ParamType : FT->param_types()) {
// Convert array arguments to pointer to simplify type lookup.
- Expr *Arg = Sema->DefaultFunctionArrayLvalueConversion(ArgExprs[i++]).get();
+ ExprResult ArgRes =
+ Sema->DefaultFunctionArrayLvalueConversion(ArgExprs[i++]);
+ if (ArgRes.isInvalid())
+ return nullptr;
+ Expr *Arg = ArgRes.get();
QualType ArgType = Arg->getType();
if (!ParamType->isPointerType() ||
ParamType.getQualifiers().hasAddressSpace() ||
@@ -5116,12 +5162,11 @@ static bool isNumberOfArgsValidForCall(Sema &S, const FunctionDecl *Callee,
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
-ExprResult
-Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
- MultiExprArg ArgExprs, SourceLocation RParenLoc,
- Expr *ExecConfig, bool IsExecConfig) {
+ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
+ MultiExprArg ArgExprs, SourceLocation RParenLoc,
+ Expr *ExecConfig, bool IsExecConfig) {
// Since this might be a postfix expression, get rid of ParenListExprs.
- ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Fn);
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(Scope, Fn);
if (Result.isInvalid()) return ExprError();
Fn = Result.get();
@@ -5134,9 +5179,9 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
if (!ArgExprs.empty()) {
// Pseudo-destructor calls should not have any arguments.
Diag(Fn->getLocStart(), diag::err_pseudo_dtor_call_with_args)
- << FixItHint::CreateRemoval(
- SourceRange(ArgExprs.front()->getLocStart(),
- ArgExprs.back()->getLocEnd()));
+ << FixItHint::CreateRemoval(
+ SourceRange(ArgExprs.front()->getLocStart(),
+ ArgExprs.back()->getLocEnd()));
}
return new (Context)
@@ -5169,7 +5214,7 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
// Determine whether this is a call to an object (C++ [over.call.object]).
if (Fn->getType()->isRecordType())
- return BuildCallToObjectOfClassType(S, Fn, LParenLoc, ArgExprs,
+ return BuildCallToObjectOfClassType(Scope, Fn, LParenLoc, ArgExprs,
RParenLoc);
if (Fn->getType() == Context.UnknownAnyTy) {
@@ -5179,7 +5224,8 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
}
if (Fn->getType() == Context.BoundMemberTy) {
- return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs, RParenLoc);
+ return BuildCallToMemberFunction(Scope, Fn, LParenLoc, ArgExprs,
+ RParenLoc);
}
}
@@ -5187,15 +5233,16 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
if (Fn->getType() == Context.OverloadTy) {
OverloadExpr::FindResult find = OverloadExpr::find(Fn);
- // We aren't supposed to apply this logic for if there's an '&' involved.
+ // We aren't supposed to apply this logic for if there'Scope an '&'
+ // involved.
if (!find.HasFormOfMemberPointer) {
OverloadExpr *ovl = find.Expression;
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(ovl))
- return BuildOverloadedCallExpr(S, Fn, ULE, LParenLoc, ArgExprs,
- RParenLoc, ExecConfig,
- /*AllowTypoCorrection=*/true,
- find.IsAddressOfOperand);
- return BuildCallToMemberFunction(S, Fn, LParenLoc, ArgExprs, RParenLoc);
+ return BuildOverloadedCallExpr(
+ Scope, Fn, ULE, LParenLoc, ArgExprs, RParenLoc, ExecConfig,
+ /*AllowTypoCorrection=*/true, find.IsAddressOfOperand);
+ return BuildCallToMemberFunction(Scope, Fn, LParenLoc, ArgExprs,
+ RParenLoc);
}
}
@@ -5225,12 +5272,12 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
// Rewrite the function decl for this builtin by replacing parameters
// with no explicit address space with the address space of the arguments
// in ArgExprs.
- if ((FDecl = rewriteBuiltinFunctionDecl(this, Context, FDecl, ArgExprs))) {
+ if ((FDecl =
+ rewriteBuiltinFunctionDecl(this, Context, FDecl, ArgExprs))) {
NDecl = FDecl;
- Fn = DeclRefExpr::Create(Context, FDecl->getQualifierLoc(),
- SourceLocation(), FDecl, false,
- SourceLocation(), FDecl->getType(),
- Fn->getValueKind(), FDecl);
+ Fn = DeclRefExpr::Create(
+ Context, FDecl->getQualifierLoc(), SourceLocation(), FDecl, false,
+ SourceLocation(), FDecl->getType(), Fn->getValueKind(), FDecl);
}
}
} else if (isa<MemberExpr>(NakedFn))
@@ -5242,6 +5289,9 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
Fn->getLocStart()))
return ExprError();
+ if (getLangOpts().OpenCL && checkOpenCLDisabledDecl(*FD, *Fn))
+ return ExprError();
+
// CheckEnableIf assumes that the we're passing in a sane number of args for
// FD, but that doesn't always hold true here. This is because, in some
// cases, we'll emit a diag about an ill-formed function call, but then
@@ -5252,10 +5302,10 @@ Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
isNumberOfArgsValidForCall(*this, FD, ArgExprs.size())) {
if (const EnableIfAttr *Attr = CheckEnableIf(FD, ArgExprs, true)) {
Diag(Fn->getLocStart(),
- isa<CXXMethodDecl>(FD) ?
- diag::err_ovl_no_viable_member_function_in_call :
- diag::err_ovl_no_viable_function_in_call)
- << FD << FD->getSourceRange();
+ isa<CXXMethodDecl>(FD)
+ ? diag::err_ovl_no_viable_member_function_in_call
+ : diag::err_ovl_no_viable_function_in_call)
+ << FD << FD->getSourceRange();
Diag(FD->getLocation(),
diag::note_ovl_candidate_disabled_by_enable_if_attr)
<< Attr->getCond()->getSourceRange() << Attr->getMessage();
@@ -5549,7 +5599,7 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
return ExprError();
LiteralExpr = Result.get();
- bool isFileScope = getCurFunctionOrMethodDecl() == nullptr;
+ bool isFileScope = !CurContext->isFunctionOrMethod();
if (isFileScope &&
!LiteralExpr->isTypeDependent() &&
!LiteralExpr->isValueDependent() &&
@@ -5559,11 +5609,31 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
}
// In C, compound literals are l-values for some reason.
- ExprValueKind VK = getLangOpts().CPlusPlus ? VK_RValue : VK_LValue;
+ // For GCC compatibility, in C++, file-scope array compound literals with
+ // constant initializers are also l-values, and compound literals are
+ // otherwise prvalues.
+ //
+ // (GCC also treats C++ list-initialized file-scope array prvalues with
+ // constant initializers as l-values, but that's non-conforming, so we don't
+ // follow it there.)
+ //
+ // FIXME: It would be better to handle the lvalue cases as materializing and
+ // lifetime-extending a temporary object, but our materialized temporaries
+ // representation only supports lifetime extension from a variable, not "out
+ // of thin air".
+ // FIXME: For C++, we might want to instead lifetime-extend only if a pointer
+ // is bound to the result of applying array-to-pointer decay to the compound
+ // literal.
+ // FIXME: GCC supports compound literals of reference type, which should
+ // obviously have a value kind derived from the kind of reference involved.
+ ExprValueKind VK =
+ (getLangOpts().CPlusPlus && !(isFileScope && literalType->isArrayType()))
+ ? VK_RValue
+ : VK_LValue;
return MaybeBindToTemporary(
- new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
- VK, LiteralExpr, isFileScope));
+ new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
+ VK, LiteralExpr, isFileScope));
}
ExprResult
@@ -6006,7 +6076,9 @@ Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
CheckTollFreeBridgeCast(castType, CastExpr);
CheckObjCBridgeRelatedCast(castType, CastExpr);
-
+
+ DiscardMisalignedMemberAddress(castType.getTypePtr(), CastExpr);
+
return BuildCStyleCastExpr(LParenLoc, castTInfo, RParenLoc, CastExpr);
}
@@ -7007,6 +7079,55 @@ static void DiagnoseConditionalPrecedence(Sema &Self,
SourceRange(CondRHS->getLocStart(), RHSExpr->getLocEnd()));
}
+/// Compute the nullability of a conditional expression.
+static QualType computeConditionalNullability(QualType ResTy, bool IsBin,
+ QualType LHSTy, QualType RHSTy,
+ ASTContext &Ctx) {
+ if (!ResTy->isAnyPointerType())
+ return ResTy;
+
+ auto GetNullability = [&Ctx](QualType Ty) {
+ Optional<NullabilityKind> Kind = Ty->getNullability(Ctx);
+ if (Kind)
+ return *Kind;
+ return NullabilityKind::Unspecified;
+ };
+
+ auto LHSKind = GetNullability(LHSTy), RHSKind = GetNullability(RHSTy);
+ NullabilityKind MergedKind;
+
+ // Compute nullability of a binary conditional expression.
+ if (IsBin) {
+ if (LHSKind == NullabilityKind::NonNull)
+ MergedKind = NullabilityKind::NonNull;
+ else
+ MergedKind = RHSKind;
+ // Compute nullability of a normal conditional expression.
+ } else {
+ if (LHSKind == NullabilityKind::Nullable ||
+ RHSKind == NullabilityKind::Nullable)
+ MergedKind = NullabilityKind::Nullable;
+ else if (LHSKind == NullabilityKind::NonNull)
+ MergedKind = RHSKind;
+ else if (RHSKind == NullabilityKind::NonNull)
+ MergedKind = LHSKind;
+ else
+ MergedKind = NullabilityKind::Unspecified;
+ }
+
+ // Return if ResTy already has the correct nullability.
+ if (GetNullability(ResTy) == MergedKind)
+ return ResTy;
+
+ // Strip all nullability from ResTy.
+ while (ResTy->getNullability(Ctx))
+ ResTy = ResTy.getSingleStepDesugaredType(Ctx);
+
+ // Create a new AttributedType with the new nullability kind.
+ auto NewAttr = AttributedType::getNullabilityAttrKind(MergedKind);
+ return Ctx.getAttributedType(NewAttr, ResTy, ResTy);
+}
+
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
@@ -7074,6 +7195,7 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
LHSExpr = CondExpr = opaqueValue;
}
+ QualType LHSTy = LHSExpr->getType(), RHSTy = RHSExpr->getType();
ExprValueKind VK = VK_RValue;
ExprObjectKind OK = OK_Ordinary;
ExprResult Cond = CondExpr, LHS = LHSExpr, RHS = RHSExpr;
@@ -7088,6 +7210,9 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
CheckBoolLikeConversion(Cond.get(), QuestionLoc);
+ result = computeConditionalNullability(result, commonExpr, LHSTy, RHSTy,
+ Context);
+
if (!commonExpr)
return new (Context)
ConditionalOperator(Cond.get(), QuestionLoc, LHS.get(), ColonLoc,
@@ -7218,7 +7343,7 @@ checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
return Sema::IncompatiblePointer;
}
if (!S.getLangOpts().CPlusPlus &&
- S.IsNoReturnConversion(ltrans, rtrans, ltrans))
+ S.IsFunctionConversion(ltrans, rtrans, ltrans))
return Sema::IncompatiblePointer;
return ConvTy;
}
@@ -7603,6 +7728,11 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
}
}
+ if (LHSType->isSamplerT() && RHSType->isIntegerType()) {
+ Kind = CK_IntToOCLSampler;
+ return Compatible;
+ }
+
return Incompatible;
}
@@ -7683,6 +7813,10 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
bool Diagnose,
bool DiagnoseCFAudited,
bool ConvertRHS) {
+ // We need to be able to tell the caller whether we diagnosed a problem, if
+ // they ask us to issue diagnostics.
+ assert((ConvertRHS || !Diagnose) && "can't indicate whether we diagnosed");
+
// If ConvertRHS is false, we want to leave the caller's RHS untouched. Sadly,
// we can't avoid *all* modifications at the moment, so we need some somewhere
// to put the updated value.
@@ -7694,9 +7828,9 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
// C++ 5.17p3: If the left operand is not of class type, the
// expression is implicitly converted (C++ 4) to the
// cv-unqualified type of the left operand.
- ExprResult Res;
+ QualType RHSType = RHS.get()->getType();
if (Diagnose) {
- Res = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ RHS = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
AA_Assigning);
} else {
ImplicitConversionSequence ICS =
@@ -7708,17 +7842,15 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
/*AllowObjCWritebackConversion=*/false);
if (ICS.isFailure())
return Incompatible;
- Res = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ RHS = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
ICS, AA_Assigning);
}
- if (Res.isInvalid())
+ if (RHS.isInvalid())
return Incompatible;
Sema::AssignConvertType result = Compatible;
if (getLangOpts().ObjCAutoRefCount &&
- !CheckObjCARCUnavailableWeakConversion(LHSType,
- RHS.get()->getType()))
+ !CheckObjCARCUnavailableWeakConversion(LHSType, RHSType))
result = IncompatibleObjCWeakRef;
- RHS = Res;
return result;
}
@@ -7942,6 +8074,7 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
// If there's an ext-vector type and a scalar, try to convert the scalar to
// the vector element type and splat.
+ // FIXME: this should also work for regular vector types as supported in GCC.
if (!RHSVecType && isa<ExtVectorType>(LHSVecType)) {
if (!tryVectorConvertAndSplat(*this, &RHS, RHSType,
LHSVecType->getElementType(), LHSType))
@@ -7954,16 +8087,31 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
return RHSType;
}
- // If we're allowing lax vector conversions, only the total (data) size needs
- // to be the same. If one of the types is scalar, the result is always the
- // vector type. Don't allow this if the scalar operand is an lvalue.
+ // FIXME: The code below also handles convertion between vectors and
+ // non-scalars, we should break this down into fine grained specific checks
+ // and emit proper diagnostics.
QualType VecType = LHSVecType ? LHSType : RHSType;
- QualType ScalarType = LHSVecType ? RHSType : LHSType;
- ExprResult *ScalarExpr = LHSVecType ? &RHS : &LHS;
- if (isLaxVectorConversion(ScalarType, VecType) &&
- !ScalarExpr->get()->isLValue()) {
- *ScalarExpr = ImpCastExprToType(ScalarExpr->get(), VecType, CK_BitCast);
- return VecType;
+ const VectorType *VT = LHSVecType ? LHSVecType : RHSVecType;
+ QualType OtherType = LHSVecType ? RHSType : LHSType;
+ ExprResult *OtherExpr = LHSVecType ? &RHS : &LHS;
+ if (isLaxVectorConversion(OtherType, VecType)) {
+ // If we're allowing lax vector conversions, only the total (data) size
+ // needs to be the same. For non compound assignment, if one of the types is
+ // scalar, the result is always the vector type.
+ if (!IsCompAssign) {
+ *OtherExpr = ImpCastExprToType(OtherExpr->get(), VecType, CK_BitCast);
+ return VecType;
+ // In a compound assignment, lhs += rhs, 'lhs' is a lvalue src, forbidding
+ // any implicit cast. Here, the 'rhs' should be implicit casted to 'lhs'
+ // type. Note that this is already done by non-compound assignments in
+ // CheckAssignmentConstraints. If it's a scalar type, only bitcast for
+ // <1 x T> -> T. The result is also a vector type.
+ } else if (OtherType->isExtVectorType() ||
+ (OtherType->isScalarType() && VT->getNumElements() == 1)) {
+ ExprResult *RHSExpr = &RHS;
+ *RHSExpr = ImpCastExprToType(RHSExpr->get(), LHSType, CK_BitCast);
+ return VecType;
+ }
}
// Okay, the expression is invalid.
@@ -8608,13 +8756,13 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
<< RHS.get()->getSourceRange();
}
-/// \brief Return the resulting type when an OpenCL vector is shifted
+/// \brief Return the resulting type when a vector is shifted
/// by a scalar or vector shift amount.
-static QualType checkOpenCLVectorShift(Sema &S,
- ExprResult &LHS, ExprResult &RHS,
- SourceLocation Loc, bool IsCompAssign) {
+static QualType checkVectorShift(Sema &S, ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, bool IsCompAssign) {
// OpenCL v1.1 s6.3.j says RHS can be a vector only if LHS is a vector.
- if (!LHS.get()->getType()->isVectorType()) {
+ if ((S.LangOpts.OpenCL || S.LangOpts.ZVector) &&
+ !LHS.get()->getType()->isVectorType()) {
S.Diag(Loc, diag::err_shift_rhs_only_vector)
<< RHS.get()->getType() << LHS.get()->getType()
<< LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
@@ -8630,15 +8778,17 @@ static QualType checkOpenCLVectorShift(Sema &S,
if (RHS.isInvalid()) return QualType();
QualType LHSType = LHS.get()->getType();
- const VectorType *LHSVecTy = LHSType->castAs<VectorType>();
- QualType LHSEleType = LHSVecTy->getElementType();
+ // Note that LHS might be a scalar because the routine calls not only in
+ // OpenCL case.
+ const VectorType *LHSVecTy = LHSType->getAs<VectorType>();
+ QualType LHSEleType = LHSVecTy ? LHSVecTy->getElementType() : LHSType;
// Note that RHS might not be a vector.
QualType RHSType = RHS.get()->getType();
const VectorType *RHSVecTy = RHSType->getAs<VectorType>();
QualType RHSEleType = RHSVecTy ? RHSVecTy->getElementType() : RHSType;
- // OpenCL v1.1 s6.3.j says that the operands need to be integers.
+ // The operands need to be integers.
if (!LHSEleType->isIntegerType()) {
S.Diag(Loc, diag::err_typecheck_expect_int)
<< LHS.get()->getType() << LHS.get()->getSourceRange();
@@ -8651,7 +8801,19 @@ static QualType checkOpenCLVectorShift(Sema &S,
return QualType();
}
- if (RHSVecTy) {
+ if (!LHSVecTy) {
+ assert(RHSVecTy);
+ if (IsCompAssign)
+ return RHSType;
+ if (LHSEleType != RHSEleType) {
+ LHS = S.ImpCastExprToType(LHS.get(),RHSEleType, CK_IntegralCast);
+ LHSEleType = RHSEleType;
+ }
+ QualType VecTy =
+ S.Context.getExtVectorType(LHSEleType, RHSVecTy->getNumElements());
+ LHS = S.ImpCastExprToType(LHS.get(), VecTy, CK_VectorSplat);
+ LHSType = VecTy;
+ } else if (RHSVecTy) {
// OpenCL v1.1 s6.3.j says that for vector types, the operators
// are applied component-wise. So if RHS is a vector, then ensure
// that the number of elements is the same as LHS...
@@ -8661,6 +8823,16 @@ static QualType checkOpenCLVectorShift(Sema &S,
<< LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
return QualType();
}
+ if (!S.LangOpts.OpenCL && !S.LangOpts.ZVector) {
+ const BuiltinType *LHSBT = LHSEleType->getAs<clang::BuiltinType>();
+ const BuiltinType *RHSBT = RHSEleType->getAs<clang::BuiltinType>();
+ if (LHSBT != RHSBT &&
+ S.Context.getTypeSize(LHSBT) != S.Context.getTypeSize(RHSBT)) {
+ S.Diag(Loc, diag::warn_typecheck_vector_element_sizes_not_equal)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ }
+ }
} else {
// ...else expand RHS to match the number of elements in LHS.
QualType VecTy =
@@ -8680,11 +8852,9 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
// Vector shifts promote their scalar inputs to vector type.
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
- if (LangOpts.OpenCL)
- return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
if (LangOpts.ZVector) {
// The shift operators for the z vector extensions work basically
- // like OpenCL shifts, except that neither the LHS nor the RHS is
+ // like general shifts, except that neither the LHS nor the RHS is
// allowed to be a "vector bool".
if (auto LHSVecType = LHS.get()->getType()->getAs<VectorType>())
if (LHSVecType->getVectorKind() == VectorType::AltiVecBool)
@@ -8692,11 +8862,8 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
if (auto RHSVecType = RHS.get()->getType()->getAs<VectorType>())
if (RHSVecType->getVectorKind() == VectorType::AltiVecBool)
return InvalidOperands(Loc, LHS, RHS);
- return checkOpenCLVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
}
- return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/false);
+ return checkVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
}
// Shifts don't perform usual arithmetic conversions, they just do integer
@@ -8795,35 +8962,21 @@ static bool convertPointersToCompositeType(Sema &S, SourceLocation Loc,
// C++ [expr.eq]p1 uses the same notion for (in)equality
// comparisons of pointers.
- // C++ [expr.eq]p2:
- // In addition, pointers to members can be compared, or a pointer to
- // member and a null pointer constant. Pointer to member conversions
- // (4.11) and qualification conversions (4.4) are performed to bring
- // them to a common type. If one operand is a null pointer constant,
- // the common type is the type of the other operand. Otherwise, the
- // common type is a pointer to member type similar (4.4) to the type
- // of one of the operands, with a cv-qualification signature (4.4)
- // that is the union of the cv-qualification signatures of the operand
- // types.
-
QualType LHSType = LHS.get()->getType();
QualType RHSType = RHS.get()->getType();
- assert((LHSType->isPointerType() && RHSType->isPointerType()) ||
- (LHSType->isMemberPointerType() && RHSType->isMemberPointerType()));
+ assert(LHSType->isPointerType() || RHSType->isPointerType() ||
+ LHSType->isMemberPointerType() || RHSType->isMemberPointerType());
- bool NonStandardCompositeType = false;
- bool *BoolPtr = S.isSFINAEContext() ? nullptr : &NonStandardCompositeType;
- QualType T = S.FindCompositePointerType(Loc, LHS, RHS, BoolPtr);
+ QualType T = S.FindCompositePointerType(Loc, LHS, RHS);
if (T.isNull()) {
- diagnoseDistinctPointerComparison(S, Loc, LHS, RHS, /*isError*/true);
+ if ((LHSType->isPointerType() || LHSType->isMemberPointerType()) &&
+ (RHSType->isPointerType() || RHSType->isMemberPointerType()))
+ diagnoseDistinctPointerComparison(S, Loc, LHS, RHS, /*isError*/true);
+ else
+ S.InvalidOperands(Loc, LHS, RHS);
return true;
}
- if (NonStandardCompositeType)
- S.Diag(Loc, diag::ext_typecheck_comparison_of_distinct_pointers_nonstandard)
- << LHSType << RHSType << T << LHS.get()->getSourceRange()
- << RHS.get()->getSourceRange();
-
LHS = S.ImpCastExprToType(LHS.get(), T, CK_BitCast);
RHS = S.ImpCastExprToType(RHS.get(), T, CK_BitCast);
return false;
@@ -8989,10 +9142,10 @@ static void diagnoseObjCLiteralComparison(Sema &S, SourceLocation Loc,
}
}
-static void diagnoseLogicalNotOnLHSofComparison(Sema &S, ExprResult &LHS,
- ExprResult &RHS,
- SourceLocation Loc,
- BinaryOperatorKind Opc) {
+/// Warns on !x < y, !x & y where !(x < y), !(x & y) was probably intended.
+static void diagnoseLogicalNotOnLHSofCheck(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, SourceLocation Loc,
+ BinaryOperatorKind Opc) {
// Check that left hand side is !something.
UnaryOperator *UO = dyn_cast<UnaryOperator>(LHS.get()->IgnoreImpCasts());
if (!UO || UO->getOpcode() != UO_LNot) return;
@@ -9005,8 +9158,9 @@ static void diagnoseLogicalNotOnLHSofComparison(Sema &S, ExprResult &LHS,
if (SubExpr->isKnownToHaveBooleanValue()) return;
// Emit warning.
- S.Diag(UO->getOperatorLoc(), diag::warn_logical_not_on_lhs_of_comparison)
- << Loc;
+ bool IsBitwiseOp = Opc == BO_And || Opc == BO_Or || Opc == BO_Xor;
+ S.Diag(UO->getOperatorLoc(), diag::warn_logical_not_on_lhs_of_check)
+ << Loc << IsBitwiseOp;
// First note suggest !(x < y)
SourceLocation FirstOpen = SubExpr->getLocStart();
@@ -9015,6 +9169,7 @@ static void diagnoseLogicalNotOnLHSofComparison(Sema &S, ExprResult &LHS,
if (FirstClose.isInvalid())
FirstOpen = SourceLocation();
S.Diag(UO->getOperatorLoc(), diag::note_logical_not_fix)
+ << IsBitwiseOp
<< FixItHint::CreateInsertion(FirstOpen, "(")
<< FixItHint::CreateInsertion(FirstClose, ")");
@@ -9063,7 +9218,7 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
Expr *RHSStripped = RHS.get()->IgnoreParenImpCasts();
checkEnumComparison(*this, Loc, LHS.get(), RHS.get());
- diagnoseLogicalNotOnLHSofComparison(*this, LHS, RHS, Loc, Opc);
+ diagnoseLogicalNotOnLHSofCheck(*this, LHS, RHS, Loc, Opc);
if (!LHSType->hasFloatingRepresentation() &&
!(LHSType->isBlockPointerType() && IsRelational) &&
@@ -9180,41 +9335,53 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
LHS.get()->getSourceRange());
}
- // All of the following pointer-related warnings are GCC extensions, except
- // when handling null pointer constants.
- if (LHSType->isPointerType() && RHSType->isPointerType()) { // C99 6.5.8p2
- QualType LCanPointeeTy =
- LHSType->castAs<PointerType>()->getPointeeType().getCanonicalType();
- QualType RCanPointeeTy =
- RHSType->castAs<PointerType>()->getPointeeType().getCanonicalType();
-
- if (getLangOpts().CPlusPlus) {
- if (LCanPointeeTy == RCanPointeeTy)
- return ResultTy;
- if (!IsRelational &&
- (LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) {
- // Valid unless comparison between non-null pointer and function pointer
- // This is a gcc extension compatibility comparison.
- // In a SFINAE context, we treat this as a hard error to maintain
- // conformance with the C++ standard.
- if ((LCanPointeeTy->isFunctionType() || RCanPointeeTy->isFunctionType())
- && !LHSIsNull && !RHSIsNull) {
- diagnoseFunctionPointerToVoidComparison(
- *this, Loc, LHS, RHS, /*isError*/ (bool)isSFINAEContext());
-
- if (isSFINAEContext())
- return QualType();
-
- RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
- return ResultTy;
- }
- }
+ if ((LHSType->isIntegerType() && !LHSIsNull) ||
+ (RHSType->isIntegerType() && !RHSIsNull)) {
+ // Skip normal pointer conversion checks in this case; we have better
+ // diagnostics for this below.
+ } else if (getLangOpts().CPlusPlus) {
+ // Equality comparison of a function pointer to a void pointer is invalid,
+ // but we allow it as an extension.
+ // FIXME: If we really want to allow this, should it be part of composite
+ // pointer type computation so it works in conditionals too?
+ if (!IsRelational &&
+ ((LHSType->isFunctionPointerType() && RHSType->isVoidPointerType()) ||
+ (RHSType->isFunctionPointerType() && LHSType->isVoidPointerType()))) {
+ // This is a gcc extension compatibility comparison.
+ // In a SFINAE context, we treat this as a hard error to maintain
+ // conformance with the C++ standard.
+ diagnoseFunctionPointerToVoidComparison(
+ *this, Loc, LHS, RHS, /*isError*/ (bool)isSFINAEContext());
+
+ if (isSFINAEContext())
+ return QualType();
+
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_BitCast);
+ return ResultTy;
+ }
+ // C++ [expr.eq]p2:
+ // If at least one operand is a pointer [...] bring them to their
+ // composite pointer type.
+ // C++ [expr.rel]p2:
+ // If both operands are pointers, [...] bring them to their composite
+ // pointer type.
+ if ((int)LHSType->isPointerType() + (int)RHSType->isPointerType() >=
+ (IsRelational ? 2 : 1)) {
if (convertPointersToCompositeType(*this, Loc, LHS, RHS))
return QualType();
else
return ResultTy;
}
+ } else if (LHSType->isPointerType() &&
+ RHSType->isPointerType()) { // C99 6.5.8p2
+ // All of the following pointer-related warnings are GCC extensions, except
+ // when handling null pointer constants.
+ QualType LCanPointeeTy =
+ LHSType->castAs<PointerType>()->getPointeeType().getCanonicalType();
+ QualType RCanPointeeTy =
+ RHSType->castAs<PointerType>()->getPointeeType().getCanonicalType();
+
// C99 6.5.9p2 and C99 6.5.8p2
if (Context.typesAreCompatible(LCanPointeeTy.getUnqualifiedType(),
RCanPointeeTy.getUnqualifiedType())) {
@@ -9259,36 +9426,63 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
}
if (getLangOpts().CPlusPlus) {
- // Comparison of nullptr_t with itself.
- if (LHSType->isNullPtrType() && RHSType->isNullPtrType())
- return ResultTy;
-
- // Comparison of pointers with null pointer constants and equality
- // comparisons of member pointers to null pointer constants.
- if (RHSIsNull &&
- ((LHSType->isAnyPointerType() || LHSType->isNullPtrType()) ||
- (!IsRelational &&
- (LHSType->isMemberPointerType() || LHSType->isBlockPointerType())))) {
- RHS = ImpCastExprToType(RHS.get(), LHSType,
- LHSType->isMemberPointerType()
- ? CK_NullToMemberPointer
- : CK_NullToPointer);
+ // C++ [expr.eq]p4:
+ // Two operands of type std::nullptr_t or one operand of type
+ // std::nullptr_t and the other a null pointer constant compare equal.
+ if (!IsRelational && LHSIsNull && RHSIsNull) {
+ if (LHSType->isNullPtrType()) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
+ return ResultTy;
+ }
+ if (RHSType->isNullPtrType()) {
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
+ return ResultTy;
+ }
+ }
+
+ // Comparison of Objective-C pointers and block pointers against nullptr_t.
+ // These aren't covered by the composite pointer type rules.
+ if (!IsRelational && RHSType->isNullPtrType() &&
+ (LHSType->isObjCObjectPointerType() || LHSType->isBlockPointerType())) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
return ResultTy;
}
- if (LHSIsNull &&
- ((RHSType->isAnyPointerType() || RHSType->isNullPtrType()) ||
- (!IsRelational &&
- (RHSType->isMemberPointerType() || RHSType->isBlockPointerType())))) {
- LHS = ImpCastExprToType(LHS.get(), RHSType,
- RHSType->isMemberPointerType()
- ? CK_NullToMemberPointer
- : CK_NullToPointer);
+ if (!IsRelational && LHSType->isNullPtrType() &&
+ (RHSType->isObjCObjectPointerType() || RHSType->isBlockPointerType())) {
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
return ResultTy;
}
- // Comparison of member pointers.
+ if (IsRelational &&
+ ((LHSType->isNullPtrType() && RHSType->isPointerType()) ||
+ (RHSType->isNullPtrType() && LHSType->isPointerType()))) {
+ // HACK: Relational comparison of nullptr_t against a pointer type is
+ // invalid per DR583, but we allow it within std::less<> and friends,
+ // since otherwise common uses of it break.
+ // FIXME: Consider removing this hack once LWG fixes std::less<> and
+ // friends to have std::nullptr_t overload candidates.
+ DeclContext *DC = CurContext;
+ if (isa<FunctionDecl>(DC))
+ DC = DC->getParent();
+ if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
+ if (CTSD->isInStdNamespace() &&
+ llvm::StringSwitch<bool>(CTSD->getName())
+ .Cases("less", "less_equal", "greater", "greater_equal", true)
+ .Default(false)) {
+ if (RHSType->isNullPtrType())
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
+ else
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
+ return ResultTy;
+ }
+ }
+ }
+
+ // C++ [expr.eq]p2:
+ // If at least one operand is a pointer to member, [...] bring them to
+ // their composite pointer type.
if (!IsRelational &&
- LHSType->isMemberPointerType() && RHSType->isMemberPointerType()) {
+ (LHSType->isMemberPointerType() || RHSType->isMemberPointerType())) {
if (convertPointersToCompositeType(*this, Loc, LHS, RHS))
return QualType();
else
@@ -9397,15 +9591,19 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
// Under a debugger, allow the comparison of pointers to integers,
// since users tend to want to compare addresses.
} else if ((LHSIsNull && LHSType->isIntegerType()) ||
- (RHSIsNull && RHSType->isIntegerType())) {
- if (IsRelational && !getLangOpts().CPlusPlus)
- DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero;
- } else if (IsRelational && !getLangOpts().CPlusPlus)
- DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer;
- else if (getLangOpts().CPlusPlus) {
+ (RHSIsNull && RHSType->isIntegerType())) {
+ if (IsRelational) {
+ isError = getLangOpts().CPlusPlus;
+ DiagID =
+ isError ? diag::err_typecheck_ordered_comparison_of_pointer_and_zero
+ : diag::ext_typecheck_ordered_comparison_of_pointer_and_zero;
+ }
+ } else if (getLangOpts().CPlusPlus) {
DiagID = diag::err_typecheck_comparison_of_pointer_integer;
isError = true;
- } else
+ } else if (IsRelational)
+ DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer;
+ else
DiagID = diag::ext_typecheck_comparison_of_pointer_integer;
if (DiagID) {
@@ -9437,6 +9635,18 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
return ResultTy;
}
+ if (getLangOpts().OpenCLVersion >= 200) {
+ if (LHSIsNull && RHSType->isQueueT()) {
+ LHS = ImpCastExprToType(LHS.get(), RHSType, CK_NullToPointer);
+ return ResultTy;
+ }
+
+ if (LHSType->isQueueT() && RHSIsNull) {
+ RHS = ImpCastExprToType(RHS.get(), LHSType, CK_NullToPointer);
+ return ResultTy;
+ }
+ }
+
return InvalidOperands(Loc, LHS, RHS);
}
@@ -9526,10 +9736,14 @@ QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
return GetSignedVectorType(LHS.get()->getType());
}
-inline QualType Sema::CheckBitwiseOperands(
- ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) {
+inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ BinaryOperatorKind Opc) {
checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+ bool IsCompAssign =
+ Opc == BO_AndAssign || Opc == BO_OrAssign || Opc == BO_XorAssign;
+
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
if (LHS.get()->getType()->hasIntegerRepresentation() &&
@@ -9540,6 +9754,9 @@ inline QualType Sema::CheckBitwiseOperands(
return InvalidOperands(Loc, LHS, RHS);
}
+ if (Opc == BO_And)
+ diagnoseLogicalNotOnLHSofCheck(*this, LHS, RHS, Loc, Opc);
+
ExprResult LHSResult = LHS, RHSResult = RHS;
QualType compType = UsualArithmeticConversions(LHSResult, RHSResult,
IsCompAssign);
@@ -9647,8 +9864,8 @@ static bool IsReadonlyMessage(Expr *E, Sema &S) {
const MemberExpr *ME = dyn_cast<MemberExpr>(E);
if (!ME) return false;
if (!isa<FieldDecl>(ME->getMemberDecl())) return false;
- ObjCMessageExpr *Base =
- dyn_cast<ObjCMessageExpr>(ME->getBase()->IgnoreParenImpCasts());
+ ObjCMessageExpr *Base = dyn_cast<ObjCMessageExpr>(
+ ME->getBase()->IgnoreImplicit()->IgnoreParenImpCasts());
if (!Base) return false;
return Base->getMethodDecl() != nullptr;
}
@@ -9722,17 +9939,16 @@ static void DiagnoseConstAssignment(Sema &S, const Expr *E,
// a note to the error.
bool DiagnosticEmitted = false;
- // Track if the current expression is the result of a derefence, and if the
- // next checked expression is the result of a derefence.
+ // Track if the current expression is the result of a dereference, and if the
+ // next checked expression is the result of a dereference.
bool IsDereference = false;
bool NextIsDereference = false;
// Loop to process MemberExpr chains.
while (true) {
IsDereference = NextIsDereference;
- NextIsDereference = false;
- E = E->IgnoreParenImpCasts();
+ E = E->IgnoreImplicit()->IgnoreParenImpCasts();
if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
NextIsDereference = ME->isArrow();
const ValueDecl *VD = ME->getMemberDecl();
@@ -9930,10 +10146,10 @@ static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
case Expr::MLV_NoSetterProperty:
llvm_unreachable("readonly properties should be processed differently");
case Expr::MLV_InvalidMessageExpression:
- DiagID = diag::error_readonly_message_assignment;
+ DiagID = diag::err_readonly_message_assignment;
break;
case Expr::MLV_SubObjCPropertySetting:
- DiagID = diag::error_no_subobject_property_setting;
+ DiagID = diag::err_no_subobject_property_setting;
break;
}
@@ -9982,6 +10198,16 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
QualType LHSType = LHSExpr->getType();
QualType RHSType = CompoundType.isNull() ? RHS.get()->getType() :
CompoundType;
+ // OpenCL v1.2 s6.1.1.1 p2:
+ // The half data type can only be used to declare a pointer to a buffer that
+ // contains half values
+ if (getLangOpts().OpenCL && !getOpenCLOptions().isEnabled("cl_khr_fp16") &&
+ LHSType->isHalfType()) {
+ Diag(Loc, diag::err_opencl_half_load_store) << 1
+ << LHSType.getUnqualifiedType();
+ return QualType();
+ }
+
AssignConvertType ConvTy;
if (CompoundType.isNull()) {
Expr *RHSCheck = RHS.get();
@@ -10519,7 +10745,8 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
return MPTy;
}
}
- } else if (!isa<FunctionDecl>(dcl) && !isa<NonTypeTemplateParmDecl>(dcl))
+ } else if (!isa<FunctionDecl>(dcl) && !isa<NonTypeTemplateParmDecl>(dcl) &&
+ !isa<BindingDecl>(dcl))
llvm_unreachable("Unknown/unexpected decl type");
}
@@ -10539,6 +10766,8 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
if (op->getType()->isObjCObjectType())
return Context.getObjCObjectPointerType(op->getType());
+ CheckAddressOfPackedMember(op);
+
return Context.getPointerType(op->getType());
}
@@ -10895,7 +11124,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
checkObjCPointerIntrospection(*this, LHS, RHS, OpLoc);
case BO_Xor:
case BO_Or:
- ResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc);
+ ResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, Opc);
break;
case BO_LAnd:
case BO_LOr:
@@ -10936,7 +11165,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
case BO_OrAssign: // fallthrough
DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc);
case BO_XorAssign:
- CompResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, true);
+ CompResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, Opc);
CompLHSTy = CompResultTy;
if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
@@ -12428,10 +12657,14 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
MayHaveConvFixit = true;
break;
case IncompatiblePointer:
- DiagKind =
- (Action == AA_Passing_CFAudited ?
- diag::err_arc_typecheck_convert_incompatible_pointer :
- diag::ext_typecheck_convert_incompatible_pointer);
+ if (Action == AA_Passing_CFAudited)
+ DiagKind = diag::err_arc_typecheck_convert_incompatible_pointer;
+ else if (SrcType->isFunctionPointerType() &&
+ DstType->isFunctionPointerType())
+ DiagKind = diag::ext_typecheck_convert_incompatible_function_pointer;
+ else
+ DiagKind = diag::ext_typecheck_convert_incompatible_pointer;
+
CheckInferredResultType = DstType->isObjCObjectPointerType() &&
SrcType->isObjCObjectPointerType();
if (Hint.isNull() && !CheckInferredResultType) {
@@ -12582,7 +12815,7 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
Diag(Loc, FDiag);
if (DiagKind == diag::warn_incompatible_qualified_id &&
PDecl && IFace && !IFace->hasDefinition())
- Diag(IFace->getLocation(), diag::not_incomplete_class_and_qualified_id)
+ Diag(IFace->getLocation(), diag::note_incomplete_class_and_qualified_id)
<< IFace->getName() << PDecl->getName();
if (SecondType == Context.OverloadTy)
@@ -13005,6 +13238,19 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
Func->getMemberSpecializationInfo()))
checkSpecializationVisibility(Loc, Func);
+ // C++14 [except.spec]p17:
+ // An exception-specification is considered to be needed when:
+ // - the function is odr-used or, if it appears in an unevaluated operand,
+ // would be odr-used if the expression were potentially-evaluated;
+ //
+ // Note, we do this even if MightBeOdrUse is false. That indicates that the
+ // function is a pure virtual function we're calling, and in that case the
+ // function was selected by overload resolution and we need to resolve its
+ // exception specification for a different reason.
+ const FunctionProtoType *FPT = Func->getType()->getAs<FunctionProtoType>();
+ if (FPT && isUnresolvedExceptionSpec(FPT->getExceptionSpecType()))
+ ResolveExceptionSpec(Loc, FPT);
+
// If we don't need to mark the function as used, and we don't need to
// try to provide a definition, there's nothing more to do.
if ((Func->isUsed(/*CheckUsedAttr=*/false) || !OdrUse) &&
@@ -13063,12 +13309,6 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
// FIXME: Is this really right?
if (CurContext == Func) return;
- // Resolve the exception specification for any function which is
- // used: CodeGen will need it.
- const FunctionProtoType *FPT = Func->getType()->getAs<FunctionProtoType>();
- if (FPT && isUnresolvedExceptionSpec(FPT->getExceptionSpecType()))
- ResolveExceptionSpec(Loc, FPT);
-
// Implicit instantiation of function templates and member functions of
// class templates.
if (Func->isImplicitlyInstantiable()) {
@@ -13137,7 +13377,7 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
static void
diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
- VarDecl *var, DeclContext *DC) {
+ ValueDecl *var, DeclContext *DC) {
DeclContext *VarDC = var->getDeclContext();
// If the parameter still belongs to the translation unit, then
@@ -13157,25 +13397,21 @@ diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
if (!S.getLangOpts().CPlusPlus && !S.CurContext->isFunctionOrMethod())
return;
+ unsigned ValueKind = isa<BindingDecl>(var) ? 1 : 0;
+ unsigned ContextKind = 3; // unknown
if (isa<CXXMethodDecl>(VarDC) &&
cast<CXXRecordDecl>(VarDC->getParent())->isLambda()) {
- S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_lambda)
- << var->getIdentifier();
- } else if (FunctionDecl *fn = dyn_cast<FunctionDecl>(VarDC)) {
- S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_function)
- << var->getIdentifier() << fn->getDeclName();
+ ContextKind = 2;
+ } else if (isa<FunctionDecl>(VarDC)) {
+ ContextKind = 0;
} else if (isa<BlockDecl>(VarDC)) {
- S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_block)
- << var->getIdentifier();
- } else {
- // FIXME: Is there any other context where a local variable can be
- // declared?
- S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_context)
- << var->getIdentifier();
+ ContextKind = 1;
}
+ S.Diag(loc, diag::err_reference_to_local_in_enclosing_context)
+ << var << ValueKind << ContextKind << VarDC;
S.Diag(var->getLocation(), diag::note_entity_declared_at)
- << var->getIdentifier();
+ << var;
// FIXME: Add additional diagnostic info about class etc. which prevents
// capture.
@@ -13319,6 +13555,23 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
}
return false;
}
+
+ // Warn about implicitly autoreleasing indirect parameters captured by blocks.
+ if (auto *PT = dyn_cast<PointerType>(CaptureType)) {
+ QualType PointeeTy = PT->getPointeeType();
+ if (isa<ObjCObjectPointerType>(PointeeTy.getCanonicalType()) &&
+ PointeeTy.getObjCLifetime() == Qualifiers::OCL_Autoreleasing &&
+ !isa<AttributedType>(PointeeTy)) {
+ if (BuildAndDiagnose) {
+ SourceLocation VarLoc = Var->getLocation();
+ S.Diag(Loc, diag::warn_block_capture_autoreleasing);
+ S.Diag(VarLoc, diag::note_declare_parameter_autoreleasing) <<
+ FixItHint::CreateInsertion(VarLoc, "__autoreleasing");
+ S.Diag(VarLoc, diag::note_declare_parameter_strong);
+ }
+ }
+ }
+
const bool HasBlocksAttr = Var->hasAttr<BlocksAttr>();
if (HasBlocksAttr || CaptureType->isReferenceType() ||
(S.getLangOpts().OpenMP && S.IsOpenMPCapturedDecl(Var))) {
@@ -13539,7 +13792,7 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
// C++ [expr.prim.lambda]p5:
// The closure type for a lambda-expression has a public inline
// function call operator [...]. This function call operator is
- // declared const (9.3.1) if and only if the lambda-expression’s
+ // declared const (9.3.1) if and only if the lambda-expression's
// parameter-declaration-clause is not followed by mutable.
DeclRefType = CaptureType.getNonReferenceType();
if (!LSI->Mutable && !CaptureType->isReferenceType())
@@ -14580,6 +14833,13 @@ namespace {
<< E->getSourceRange();
return ExprError();
}
+
+ if (isa<CallExpr>(E->getSubExpr())) {
+ S.Diag(E->getOperatorLoc(), diag::err_unknown_any_addrof_call)
+ << E->getSourceRange();
+ return ExprError();
+ }
+
assert(E->getValueKind() == VK_RValue);
assert(E->getObjectKind() == OK_Ordinary);
E->setType(DestType);
@@ -15104,11 +15364,6 @@ ExprResult Sema::ActOnObjCAvailabilityCheckExpr(
VersionTuple Version;
if (Spec != AvailSpecs.end())
Version = Spec->getVersion();
- else
- // This is the '*' case in @available. We should diagnose this; the
- // programmer should explicitly account for this case if they target this
- // platform.
- Diag(AtLoc, diag::warn_available_using_star_case) << RParen << Platform;
return new (Context)
ObjCAvailabilityCheckExpr(Version, AtLoc, RParen, Context.BoolTy);
diff --git a/lib/Sema/SemaExprCXX.cpp b/lib/Sema/SemaExprCXX.cpp
index dfdd36752bf6..5f769cc40ded 100644
--- a/lib/Sema/SemaExprCXX.cpp
+++ b/lib/Sema/SemaExprCXX.cpp
@@ -292,7 +292,7 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
if (isDependent) {
// We didn't find our type, but that's okay: it's dependent
// anyway.
-
+
// FIXME: What if we have no nested-name-specifier?
QualType T = CheckTypenameType(ETK_None, SourceLocation(),
SS.getWithLocInContext(Context),
@@ -326,14 +326,14 @@ ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
ParsedType Sema::getDestructorType(const DeclSpec& DS, ParsedType ObjectType) {
if (DS.getTypeSpecType() == DeclSpec::TST_error || !ObjectType)
return nullptr;
- assert(DS.getTypeSpecType() == DeclSpec::TST_decltype
+ assert(DS.getTypeSpecType() == DeclSpec::TST_decltype
&& "only get destructor types from declspecs");
QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
QualType SearchType = GetTypeFromParser(ObjectType);
if (SearchType->isDependentType() || Context.hasSameUnqualifiedType(SearchType, T)) {
return ParsedType::make(T);
}
-
+
Diag(DS.getTypeSpecTypeLoc(), diag::err_destructor_expr_type_mismatch)
<< T << SearchType;
return nullptr;
@@ -520,17 +520,17 @@ getUuidAttrOfType(Sema &SemaRef, QualType QT,
else if (QT->isArrayType())
Ty = Ty->getBaseElementTypeUnsafe();
- const auto *RD = Ty->getAsCXXRecordDecl();
- if (!RD)
+ const auto *TD = Ty->getAsTagDecl();
+ if (!TD)
return;
- if (const auto *Uuid = RD->getMostRecentDecl()->getAttr<UuidAttr>()) {
+ if (const auto *Uuid = TD->getMostRecentDecl()->getAttr<UuidAttr>()) {
UuidAttrs.insert(Uuid);
return;
}
// __uuidof can grab UUIDs from template arguments.
- if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ if (const auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(TD)) {
const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
for (const TemplateArgument &TA : TAL.asArray()) {
const UuidAttr *UuidForTA = nullptr;
@@ -662,7 +662,7 @@ Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
IsThrownVarInScope = true;
break;
}
-
+
if (S->getFlags() &
(Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
Scope::FunctionPrototypeScope | Scope::ObjCMethodScope |
@@ -672,17 +672,22 @@ Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
}
}
}
-
+
return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
}
-ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
+ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope) {
// Don't report an error if 'throw' is used in system headers.
if (!getLangOpts().CXXExceptions &&
!getSourceManager().isInSystemHeader(OpLoc))
Diag(OpLoc, diag::err_exceptions_disabled) << "throw";
+ // Exceptions aren't allowed in CUDA device code.
+ if (getLangOpts().CUDA)
+ CUDADiagIfDeviceCode(OpLoc, diag::err_cuda_device_exceptions)
+ << "throw" << CurrentCUDATarget();
+
if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw";
@@ -858,13 +863,8 @@ bool Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc,
// We don't keep the instantiated default argument expressions around so
// we must rebuild them here.
for (unsigned I = 1, E = CD->getNumParams(); I != E; ++I) {
- // Skip any default arguments that we've already instantiated.
- if (Context.getDefaultArgExprForConstructor(CD, I))
- continue;
-
- Expr *DefaultArg =
- BuildCXXDefaultArgExpr(ThrowLoc, CD, CD->getParamDecl(I)).get();
- Context.addDefaultArgExprForConstructor(CD, I, DefaultArg);
+ if (CheckCXXDefaultArgExpr(ThrowLoc, CD, CD->getParamDecl(I)))
+ return true;
}
}
}
@@ -903,10 +903,10 @@ static QualType adjustCVQualifiersForCXXThisWithinLambda(
I-- && isa<LambdaScopeInfo>(FunctionScopes[I]);
CurDC = getLambdaAwareParentOfDeclContext(CurDC)) {
CurLSI = cast<LambdaScopeInfo>(FunctionScopes[I]);
-
- if (!CurLSI->isCXXThisCaptured())
+
+ if (!CurLSI->isCXXThisCaptured())
continue;
-
+
auto C = CurLSI->getCXXThisCapture();
if (C.isCopyCapture()) {
@@ -922,7 +922,7 @@ static QualType adjustCVQualifiersForCXXThisWithinLambda(
assert(CurLSI);
assert(isGenericLambdaCallOperatorSpecialization(CurLSI->CallOperator));
assert(CurDC == getLambdaAwareParentOfDeclContext(CurLSI->CallOperator));
-
+
auto IsThisCaptured =
[](CXXRecordDecl *Closure, bool &IsByCopy, bool &IsConst) {
IsConst = false;
@@ -992,10 +992,10 @@ QualType Sema::getCurrentThisType() {
return ThisTy;
}
-Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
+Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
Decl *ContextDecl,
unsigned CXXThisTypeQuals,
- bool Enabled)
+ bool Enabled)
: S(S), OldCXXThisTypeOverride(S.CXXThisTypeOverride), Enabled(false)
{
if (!Enabled || !ContextDecl)
@@ -1006,13 +1006,13 @@ Sema::CXXThisScopeRAII::CXXThisScopeRAII(Sema &S,
Record = Template->getTemplatedDecl();
else
Record = cast<CXXRecordDecl>(ContextDecl);
-
+
// We care only for CVR qualifiers here, so cut everything else.
CXXThisTypeQuals &= Qualifiers::FastMask;
S.CXXThisTypeOverride
= S.Context.getPointerType(
S.Context.getRecordType(Record).withCVRQualifiers(CXXThisTypeQuals));
-
+
this->Enabled = true;
}
@@ -1026,7 +1026,7 @@ Sema::CXXThisScopeRAII::~CXXThisScopeRAII() {
static Expr *captureThis(Sema &S, ASTContext &Context, RecordDecl *RD,
QualType ThisTy, SourceLocation Loc,
const bool ByCopy) {
-
+
QualType AdjustedThisTy = ThisTy;
// The type of the corresponding data member (not a 'this' pointer if 'by
// copy').
@@ -1039,7 +1039,7 @@ static Expr *captureThis(Sema &S, ASTContext &Context, RecordDecl *RD,
CaptureThisFieldTy.removeLocalCVRQualifiers(Qualifiers::CVRMask);
AdjustedThisTy = Context.getPointerType(CaptureThisFieldTy);
}
-
+
FieldDecl *Field = FieldDecl::Create(
Context, RD, Loc, Loc, nullptr, CaptureThisFieldTy,
Context.getTrivialTypeSourceInfo(CaptureThisFieldTy, Loc), nullptr, false,
@@ -1065,24 +1065,24 @@ static Expr *captureThis(Sema &S, ASTContext &Context, RecordDecl *RD,
return This;
}
-bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
+bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt,
const bool ByCopy) {
// We don't need to capture this in an unevaluated context.
if (isUnevaluatedContext() && !Explicit)
return true;
-
+
assert((!ByCopy || Explicit) && "cannot implicitly capture *this by value");
const unsigned MaxFunctionScopesIndex = FunctionScopeIndexToStopAt ?
*FunctionScopeIndexToStopAt : FunctionScopes.size() - 1;
-
+
// Check that we can capture the *enclosing object* (referred to by '*this')
- // by the capturing-entity/closure (lambda/block/etc) at
- // MaxFunctionScopesIndex-deep on the FunctionScopes stack.
+ // by the capturing-entity/closure (lambda/block/etc) at
+ // MaxFunctionScopesIndex-deep on the FunctionScopes stack.
- // Note: The *enclosing object* can only be captured by-value by a
- // closure that is a lambda, using the explicit notation:
+ // Note: The *enclosing object* can only be captured by-value by a
+ // closure that is a lambda, using the explicit notation:
// [*this] { ... }.
// Every other capture of the *enclosing object* results in its by-reference
// capture.
@@ -1091,15 +1091,15 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
// stack), we can capture the *enclosing object* only if:
// - 'L' has an explicit byref or byval capture of the *enclosing object*
// - or, 'L' has an implicit capture.
- // AND
+ // AND
// -- there is no enclosing closure
- // -- or, there is some enclosing closure 'E' that has already captured the
- // *enclosing object*, and every intervening closure (if any) between 'E'
+ // -- or, there is some enclosing closure 'E' that has already captured the
+ // *enclosing object*, and every intervening closure (if any) between 'E'
// and 'L' can implicitly capture the *enclosing object*.
- // -- or, every enclosing closure can implicitly capture the
+ // -- or, every enclosing closure can implicitly capture the
// *enclosing object*
-
-
+
+
unsigned NumCapturingClosures = 0;
for (unsigned idx = MaxFunctionScopesIndex; idx != 0; idx--) {
if (CapturingScopeInfo *CSI =
@@ -1145,7 +1145,7 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
// In the loop below, respect the ByCopy flag only for the closure requesting
// the capture (i.e. first iteration through the loop below). Ignore it for
- // all enclosing closure's upto NumCapturingClosures (since they must be
+ // all enclosing closure's up to NumCapturingClosures (since they must be
// implicitly capturing the *enclosing object* by reference (see loop
// above)).
assert((!ByCopy ||
@@ -1155,18 +1155,18 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
// FIXME: We need to delay this marking in PotentiallyPotentiallyEvaluated
// contexts.
QualType ThisTy = getCurrentThisType();
- for (unsigned idx = MaxFunctionScopesIndex; NumCapturingClosures;
+ for (unsigned idx = MaxFunctionScopesIndex; NumCapturingClosures;
--idx, --NumCapturingClosures) {
CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[idx]);
Expr *ThisExpr = nullptr;
-
+
if (LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI)) {
// For lambda expressions, build a field and an initializing expression,
// and capture the *enclosing object* by copy only if this is the first
// iteration.
ThisExpr = captureThis(*this, Context, LSI->Lambda, ThisTy, Loc,
ByCopy && idx == MaxFunctionScopesIndex);
-
+
} else if (CapturedRegionScopeInfo *RSI
= dyn_cast<CapturedRegionScopeInfo>(FunctionScopes[idx]))
ThisExpr =
@@ -1196,7 +1196,7 @@ bool Sema::isThisOutsideMemberFunctionBody(QualType BaseType) {
// type for 'this'.
if (CXXThisTypeOverride.isNull())
return false;
-
+
// Determine whether we're looking into a class that's currently being
// defined.
CXXRecordDecl *Class = BaseType->getAsCXXRecordDecl();
@@ -1216,6 +1216,17 @@ Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
if (!TInfo)
TInfo = Context.getTrivialTypeSourceInfo(Ty, SourceLocation());
+ // Handle errors like: int({0})
+ if (exprs.size() == 1 && !canInitializeWithParenthesizedList(Ty) &&
+ LParenLoc.isValid() && RParenLoc.isValid())
+ if (auto IList = dyn_cast<InitListExpr>(exprs[0])) {
+ Diag(TInfo->getTypeLoc().getLocStart(), diag::err_list_init_in_parens)
+ << Ty << IList->getSourceRange()
+ << FixItHint::CreateRemoval(LParenLoc)
+ << FixItHint::CreateRemoval(RParenLoc);
+ LParenLoc = RParenLoc = SourceLocation();
+ }
+
auto Result = BuildCXXTypeConstructExpr(TInfo, LParenLoc, exprs, RParenLoc);
// Avoid creating a non-type-dependent expression that contains typos.
// Non-type-dependent expressions are liable to be discarded without
@@ -1280,10 +1291,6 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
diag::err_invalid_incomplete_type_use, FullRange))
return ExprError();
- if (RequireNonAbstractType(TyBeginLoc, Ty,
- diag::err_allocation_of_abstract_type))
- return ExprError();
-
InitializedEntity Entity = InitializedEntity::InitializeTemporary(TInfo);
InitializationKind Kind =
Exprs.size() ? ListInitialization
@@ -1317,8 +1324,133 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
return Result;
}
-/// doesUsualArrayDeleteWantSize - Answers whether the usual
-/// operator delete[] for the given type has a size_t parameter.
+/// \brief Determine whether the given function is a non-placement
+/// deallocation function.
+static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
+ if (FD->isInvalidDecl())
+ return false;
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FD))
+ return Method->isUsualDeallocationFunction();
+
+ if (FD->getOverloadedOperator() != OO_Delete &&
+ FD->getOverloadedOperator() != OO_Array_Delete)
+ return false;
+
+ unsigned UsualParams = 1;
+
+ if (S.getLangOpts().SizedDeallocation && UsualParams < FD->getNumParams() &&
+ S.Context.hasSameUnqualifiedType(
+ FD->getParamDecl(UsualParams)->getType(),
+ S.Context.getSizeType()))
+ ++UsualParams;
+
+ if (S.getLangOpts().AlignedAllocation && UsualParams < FD->getNumParams() &&
+ S.Context.hasSameUnqualifiedType(
+ FD->getParamDecl(UsualParams)->getType(),
+ S.Context.getTypeDeclType(S.getStdAlignValT())))
+ ++UsualParams;
+
+ return UsualParams == FD->getNumParams();
+}
+
+namespace {
+ struct UsualDeallocFnInfo {
+ UsualDeallocFnInfo() : Found(), FD(nullptr) {}
+ UsualDeallocFnInfo(Sema &S, DeclAccessPair Found)
+ : Found(Found), FD(dyn_cast<FunctionDecl>(Found->getUnderlyingDecl())),
+ HasSizeT(false), HasAlignValT(false), CUDAPref(Sema::CFP_Native) {
+ // A function template declaration is never a usual deallocation function.
+ if (!FD)
+ return;
+ if (FD->getNumParams() == 3)
+ HasAlignValT = HasSizeT = true;
+ else if (FD->getNumParams() == 2) {
+ HasSizeT = FD->getParamDecl(1)->getType()->isIntegerType();
+ HasAlignValT = !HasSizeT;
+ }
+
+ // In CUDA, determine how much we'd like / dislike to call this.
+ if (S.getLangOpts().CUDA)
+ if (auto *Caller = dyn_cast<FunctionDecl>(S.CurContext))
+ CUDAPref = S.IdentifyCUDAPreference(Caller, FD);
+ }
+
+ operator bool() const { return FD; }
+
+ bool isBetterThan(const UsualDeallocFnInfo &Other, bool WantSize,
+ bool WantAlign) const {
+ // C++17 [expr.delete]p10:
+ // If the type has new-extended alignment, a function with a parameter
+ // of type std::align_val_t is preferred; otherwise a function without
+ // such a parameter is preferred
+ if (HasAlignValT != Other.HasAlignValT)
+ return HasAlignValT == WantAlign;
+
+ if (HasSizeT != Other.HasSizeT)
+ return HasSizeT == WantSize;
+
+ // Use CUDA call preference as a tiebreaker.
+ return CUDAPref > Other.CUDAPref;
+ }
+
+ DeclAccessPair Found;
+ FunctionDecl *FD;
+ bool HasSizeT, HasAlignValT;
+ Sema::CUDAFunctionPreference CUDAPref;
+ };
+}
+
+/// Determine whether a type has new-extended alignment. This may be called when
+/// the type is incomplete (for a delete-expression with an incomplete pointee
+/// type), in which case it will conservatively return false if the alignment is
+/// not known.
+static bool hasNewExtendedAlignment(Sema &S, QualType AllocType) {
+ return S.getLangOpts().AlignedAllocation &&
+ S.getASTContext().getTypeAlignIfKnown(AllocType) >
+ S.getASTContext().getTargetInfo().getNewAlign();
+}
+
+/// Select the correct "usual" deallocation function to use from a selection of
+/// deallocation functions (either global or class-scope).
+static UsualDeallocFnInfo resolveDeallocationOverload(
+ Sema &S, LookupResult &R, bool WantSize, bool WantAlign,
+ llvm::SmallVectorImpl<UsualDeallocFnInfo> *BestFns = nullptr) {
+ UsualDeallocFnInfo Best;
+
+ for (auto I = R.begin(), E = R.end(); I != E; ++I) {
+ UsualDeallocFnInfo Info(S, I.getPair());
+ if (!Info || !isNonPlacementDeallocationFunction(S, Info.FD) ||
+ Info.CUDAPref == Sema::CFP_Never)
+ continue;
+
+ if (!Best) {
+ Best = Info;
+ if (BestFns)
+ BestFns->push_back(Info);
+ continue;
+ }
+
+ if (Best.isBetterThan(Info, WantSize, WantAlign))
+ continue;
+
+ // If more than one preferred function is found, all non-preferred
+ // functions are eliminated from further consideration.
+ if (BestFns && Info.isBetterThan(Best, WantSize, WantAlign))
+ BestFns->clear();
+
+ Best = Info;
+ if (BestFns)
+ BestFns->push_back(Info);
+ }
+
+ return Best;
+}
+
+/// Determine whether a given type is a class for which 'delete[]' would call
+/// a member 'operator delete[]' with a 'size_t' parameter. This implies that
+/// we need to store the array size (even if the type is
+/// trivially-destructible).
static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
QualType allocType) {
const RecordType *record =
@@ -1342,35 +1474,13 @@ static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
// on this thing, so it doesn't matter if we allocate extra space or not.
if (ops.isAmbiguous()) return false;
- LookupResult::Filter filter = ops.makeFilter();
- while (filter.hasNext()) {
- NamedDecl *del = filter.next()->getUnderlyingDecl();
-
- // C++0x [basic.stc.dynamic.deallocation]p2:
- // A template instance is never a usual deallocation function,
- // regardless of its signature.
- if (isa<FunctionTemplateDecl>(del)) {
- filter.erase();
- continue;
- }
-
- // C++0x [basic.stc.dynamic.deallocation]p2:
- // If class T does not declare [an operator delete[] with one
- // parameter] but does declare a member deallocation function
- // named operator delete[] with exactly two parameters, the
- // second of which has type std::size_t, then this function
- // is a usual deallocation function.
- if (!cast<CXXMethodDecl>(del)->isUsualDeallocationFunction()) {
- filter.erase();
- continue;
- }
- }
- filter.done();
-
- if (!ops.isSingleResult()) return false;
-
- const FunctionDecl *del = cast<FunctionDecl>(ops.getFoundDecl());
- return (del->getNumParams() == 2);
+ // C++17 [expr.delete]p10:
+ // If the deallocation functions have class scope, the one without a
+ // parameter of type std::size_t is selected.
+ auto Best = resolveDeallocationOverload(
+ S, ops, /*WantSize*/false,
+ /*WantAlign*/hasNewExtendedAlignment(S, allocType));
+ return Best && Best.HasSizeT;
}
/// \brief Parsed a C++ 'new' expression (C++ 5.3.4).
@@ -1454,8 +1564,20 @@ Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
return ExprError();
SourceRange DirectInitRange;
- if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer))
+ if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer)) {
DirectInitRange = List->getSourceRange();
+ // Handle errors like: new int a({0})
+ if (List->getNumExprs() == 1 &&
+ !canInitializeWithParenthesizedList(AllocType))
+ if (auto IList = dyn_cast<InitListExpr>(List->getExpr(0))) {
+ Diag(TInfo->getTypeLoc().getLocStart(), diag::err_list_init_in_parens)
+ << AllocType << List->getSourceRange()
+ << FixItHint::CreateRemoval(List->getLocStart())
+ << FixItHint::CreateRemoval(List->getLocEnd());
+ DirectInitRange = SourceRange();
+ Initializer = IList;
+ }
+ }
return BuildCXXNew(SourceRange(StartLoc, D.getLocEnd()), UseGlobal,
PlacementLParen,
@@ -1574,7 +1696,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
<< /*at end of FE*/0 << Inits[0]->getSourceRange();
}
- // In ARC, infer 'retaining' for the allocated
+ // In ARC, infer 'retaining' for the allocated
if (getLangOpts().ObjCAutoRefCount &&
AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
AllocType->isObjCLifetimeType()) {
@@ -1583,7 +1705,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
}
QualType ResultType = Context.getPointerType(AllocType);
-
+
if (ArraySize && ArraySize->getType()->isNonOverloadPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(ArraySize);
if (result.isInvalid()) return ExprError();
@@ -1596,6 +1718,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// conversion function to integral or unscoped enumeration type exists.
// C++1y [expr.new]p6: The expression [...] is implicitly converted to
// std::size_t.
+ llvm::Optional<uint64_t> KnownArraySize;
if (ArraySize && !ArraySize->isTypeDependent()) {
ExprResult ConvertedSize;
if (getLangOpts().CPlusPlus14) {
@@ -1604,7 +1727,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
ConvertedSize = PerformImplicitConversion(ArraySize, Context.getSizeType(),
AA_Converting);
- if (!ConvertedSize.isInvalid() &&
+ if (!ConvertedSize.isInvalid() &&
ArraySize->getType()->getAs<RecordType>())
// Diagnose the compatibility of this conversion.
Diag(StartLoc, diag::warn_cxx98_compat_array_size_conversion)
@@ -1613,7 +1736,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
class SizeConvertDiagnoser : public ICEConvertDiagnoser {
protected:
Expr *ArraySize;
-
+
public:
SizeConvertDiagnoser(Expr *ArraySize)
: ICEConvertDiagnoser(/*AllowScopedEnumerations*/false, false, false),
@@ -1680,44 +1803,34 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// The expression in a direct-new-declarator shall have integral type
// with a non-negative value.
//
- // Let's see if this is a constant < 0. If so, we reject it out of
- // hand. Otherwise, if it's not a constant, we must have an unparenthesized
- // array type.
- //
- // Note: such a construct has well-defined semantics in C++11: it throws
- // std::bad_array_new_length.
+ // Let's see if this is a constant < 0. If so, we reject it out of hand,
+ // per CWG1464. Otherwise, if it's not a constant, we must have an
+ // unparenthesized array type.
if (!ArraySize->isValueDependent()) {
llvm::APSInt Value;
// We've already performed any required implicit conversion to integer or
// unscoped enumeration type.
+ // FIXME: Per CWG1464, we are required to check the value prior to
+ // converting to size_t. This will never find a negative array size in
+ // C++14 onwards, because Value is always unsigned here!
if (ArraySize->isIntegerConstantExpr(Value, Context)) {
- if (Value < llvm::APSInt(
- llvm::APInt::getNullValue(Value.getBitWidth()),
- Value.isUnsigned())) {
- if (getLangOpts().CPlusPlus11)
- Diag(ArraySize->getLocStart(),
- diag::warn_typecheck_negative_array_new_size)
- << ArraySize->getSourceRange();
- else
- return ExprError(Diag(ArraySize->getLocStart(),
- diag::err_typecheck_negative_array_size)
- << ArraySize->getSourceRange());
- } else if (!AllocType->isDependentType()) {
+ if (Value.isSigned() && Value.isNegative()) {
+ return ExprError(Diag(ArraySize->getLocStart(),
+ diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange());
+ }
+
+ if (!AllocType->isDependentType()) {
unsigned ActiveSizeBits =
ConstantArrayType::getNumAddressingBits(Context, AllocType, Value);
- if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
- if (getLangOpts().CPlusPlus11)
- Diag(ArraySize->getLocStart(),
- diag::warn_array_new_too_large)
- << Value.toString(10)
- << ArraySize->getSourceRange();
- else
- return ExprError(Diag(ArraySize->getLocStart(),
- diag::err_array_too_large)
- << Value.toString(10)
- << ArraySize->getSourceRange());
- }
+ if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
+ return ExprError(Diag(ArraySize->getLocStart(),
+ diag::err_array_too_large)
+ << Value.toString(10)
+ << ArraySize->getSourceRange());
}
+
+ KnownArraySize = Value.getZExtValue();
} else if (TypeIdParens.isValid()) {
// Can't have dynamic array size when the type-id is in parentheses.
Diag(ArraySize->getLocStart(), diag::ext_new_paren_array_nonconst)
@@ -1735,21 +1848,26 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
FunctionDecl *OperatorNew = nullptr;
FunctionDecl *OperatorDelete = nullptr;
+ unsigned Alignment =
+ AllocType->isDependentType() ? 0 : Context.getTypeAlign(AllocType);
+ unsigned NewAlignment = Context.getTargetInfo().getNewAlign();
+ bool PassAlignment = getLangOpts().AlignedAllocation &&
+ Alignment > NewAlignment;
if (!AllocType->isDependentType() &&
!Expr::hasAnyTypeDependentArguments(PlacementArgs) &&
FindAllocationFunctions(StartLoc,
SourceRange(PlacementLParen, PlacementRParen),
- UseGlobal, AllocType, ArraySize, PlacementArgs,
- OperatorNew, OperatorDelete))
+ UseGlobal, AllocType, ArraySize, PassAlignment,
+ PlacementArgs, OperatorNew, OperatorDelete))
return ExprError();
// If this is an array allocation, compute whether the usual array
// deallocation function for the type has a size_t parameter.
bool UsualArrayDeleteWantsSize = false;
if (ArraySize && !AllocType->isDependentType())
- UsualArrayDeleteWantsSize
- = doesUsualArrayDeleteWantSize(*this, StartLoc, AllocType);
+ UsualArrayDeleteWantsSize =
+ doesUsualArrayDeleteWantSize(*this, StartLoc, AllocType);
SmallVector<Expr *, 8> AllPlaceArgs;
if (OperatorNew) {
@@ -1760,9 +1878,11 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// We've already converted the placement args, just fill in any default
// arguments. Skip the first parameter because we don't have a corresponding
- // argument.
- if (GatherArgumentsForCall(PlacementLParen, OperatorNew, Proto, 1,
- PlacementArgs, AllPlaceArgs, CallType))
+ // argument. Skip the second parameter too if we're passing in the
+ // alignment; we've already filled it in.
+ if (GatherArgumentsForCall(PlacementLParen, OperatorNew, Proto,
+ PassAlignment ? 2 : 1, PlacementArgs,
+ AllPlaceArgs, CallType))
return ExprError();
if (!AllPlaceArgs.empty())
@@ -1772,44 +1892,29 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
DiagnoseSentinelCalls(OperatorNew, PlacementLParen, PlacementArgs);
// FIXME: Missing call to CheckFunctionCall or equivalent
- }
- // Warn if the type is over-aligned and is being allocated by global operator
- // new.
- if (PlacementArgs.empty() && OperatorNew &&
- (OperatorNew->isImplicit() ||
- (OperatorNew->getLocStart().isValid() &&
- getSourceManager().isInSystemHeader(OperatorNew->getLocStart())))) {
- if (unsigned Align = Context.getPreferredTypeAlign(AllocType.getTypePtr())){
- unsigned SuitableAlign = Context.getTargetInfo().getSuitableAlign();
- if (Align > SuitableAlign)
+ // Warn if the type is over-aligned and is being allocated by (unaligned)
+ // global operator new.
+ if (PlacementArgs.empty() && !PassAlignment &&
+ (OperatorNew->isImplicit() ||
+ (OperatorNew->getLocStart().isValid() &&
+ getSourceManager().isInSystemHeader(OperatorNew->getLocStart())))) {
+ if (Alignment > NewAlignment)
Diag(StartLoc, diag::warn_overaligned_type)
<< AllocType
- << unsigned(Align / Context.getCharWidth())
- << unsigned(SuitableAlign / Context.getCharWidth());
+ << unsigned(Alignment / Context.getCharWidth())
+ << unsigned(NewAlignment / Context.getCharWidth());
}
}
- QualType InitType = AllocType;
// Array 'new' can't have any initializers except empty parentheses.
// Initializer lists are also allowed, in C++11. Rely on the parser for the
// dialect distinction.
- if (ResultType->isArrayType() || ArraySize) {
- if (!isLegalArrayNewInitializer(initStyle, Initializer)) {
- SourceRange InitRange(Inits[0]->getLocStart(),
- Inits[NumInits - 1]->getLocEnd());
- Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
- return ExprError();
- }
- if (InitListExpr *ILE = dyn_cast_or_null<InitListExpr>(Initializer)) {
- // We do the initialization typechecking against the array type
- // corresponding to the number of initializers + 1 (to also check
- // default-initialization).
- unsigned NumElements = ILE->getNumInits() + 1;
- InitType = Context.getConstantArrayType(AllocType,
- llvm::APInt(Context.getTypeSize(Context.getSizeType()), NumElements),
- ArrayType::Normal, 0);
- }
+ if (ArraySize && !isLegalArrayNewInitializer(initStyle, Initializer)) {
+ SourceRange InitRange(Inits[0]->getLocStart(),
+ Inits[NumInits - 1]->getLocEnd());
+ Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
+ return ExprError();
}
// If we can perform the initialization, and we've not already done so,
@@ -1817,6 +1922,19 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
if (!AllocType->isDependentType() &&
!Expr::hasAnyTypeDependentArguments(
llvm::makeArrayRef(Inits, NumInits))) {
+ // The type we initialize is the complete type, including the array bound.
+ QualType InitType;
+ if (KnownArraySize)
+ InitType = Context.getConstantArrayType(
+ AllocType, llvm::APInt(Context.getTypeSize(Context.getSizeType()),
+ *KnownArraySize),
+ ArrayType::Normal, 0);
+ else if (ArraySize)
+ InitType =
+ Context.getIncompleteArrayType(AllocType, ArrayType::Normal, 0);
+ else
+ InitType = AllocType;
+
// C++11 [expr.new]p15:
// A new-expression that creates an object of type T initializes that
// object as follows:
@@ -1836,7 +1954,8 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
InitializedEntity Entity
= InitializedEntity::InitializeNew(StartLoc, InitType);
- InitializationSequence InitSeq(*this, Entity, Kind, MultiExprArg(Inits, NumInits));
+ InitializationSequence InitSeq(*this, Entity, Kind,
+ MultiExprArg(Inits, NumInits));
ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind,
MultiExprArg(Inits, NumInits));
if (FullInit.isInvalid())
@@ -1844,6 +1963,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// FullInit is our initializer; strip off CXXBindTemporaryExprs, because
// we don't want the initialized object to be destructed.
+ // FIXME: We should not create these in the first place.
if (CXXBindTemporaryExpr *Binder =
dyn_cast_or_null<CXXBindTemporaryExpr>(FullInit.get()))
FullInit = Binder->getSubExpr();
@@ -1872,7 +1992,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
if (CXXDestructorDecl *dtor = LookupDestructor(
cast<CXXRecordDecl>(BaseRecordType->getDecl()))) {
MarkFunctionReferenced(StartLoc, dtor);
- CheckDestructorAccess(StartLoc, dtor,
+ CheckDestructorAccess(StartLoc, dtor,
PDiag(diag::err_access_dtor)
<< BaseAllocType);
if (DiagnoseUseOfDecl(dtor, StartLoc))
@@ -1882,7 +2002,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
}
return new (Context)
- CXXNewExpr(Context, UseGlobal, OperatorNew, OperatorDelete,
+ CXXNewExpr(Context, UseGlobal, OperatorNew, OperatorDelete, PassAlignment,
UsualArrayDeleteWantsSize, PlacementArgs, TypeIdParens,
ArraySize, initStyle, Initializer, ResultType, AllocTypeInfo,
Range, DirectInitRange);
@@ -1921,36 +2041,132 @@ bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
<< BaseAllocType;
}
}
-
+
return false;
}
-/// \brief Determine whether the given function is a non-placement
-/// deallocation function.
-static bool isNonPlacementDeallocationFunction(Sema &S, FunctionDecl *FD) {
- if (FD->isInvalidDecl())
- return false;
+static bool
+resolveAllocationOverload(Sema &S, LookupResult &R, SourceRange Range,
+ SmallVectorImpl<Expr *> &Args, bool &PassAlignment,
+ FunctionDecl *&Operator,
+ OverloadCandidateSet *AlignedCandidates = nullptr,
+ Expr *AlignArg = nullptr) {
+ OverloadCandidateSet Candidates(R.getNameLoc(),
+ OverloadCandidateSet::CSK_Normal);
+ for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
+ Alloc != AllocEnd; ++Alloc) {
+ // Even member operator new/delete are implicitly treated as
+ // static, so don't use AddMemberCandidate.
+ NamedDecl *D = (*Alloc)->getUnderlyingDecl();
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FD))
- return Method->isUsualDeallocationFunction();
+ if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
+ S.AddTemplateOverloadCandidate(FnTemplate, Alloc.getPair(),
+ /*ExplicitTemplateArgs=*/nullptr, Args,
+ Candidates,
+ /*SuppressUserConversions=*/false);
+ continue;
+ }
- if (FD->getOverloadedOperator() != OO_Delete &&
- FD->getOverloadedOperator() != OO_Array_Delete)
+ FunctionDecl *Fn = cast<FunctionDecl>(D);
+ S.AddOverloadCandidate(Fn, Alloc.getPair(), Args, Candidates,
+ /*SuppressUserConversions=*/false);
+ }
+
+ // Do the resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (Candidates.BestViableFunction(S, R.getNameLoc(), Best)) {
+ case OR_Success: {
+ // Got one!
+ FunctionDecl *FnDecl = Best->Function;
+ if (S.CheckAllocationAccess(R.getNameLoc(), Range, R.getNamingClass(),
+ Best->FoundDecl) == Sema::AR_inaccessible)
+ return true;
+
+ Operator = FnDecl;
return false;
+ }
+
+ case OR_No_Viable_Function:
+ // C++17 [expr.new]p13:
+ // If no matching function is found and the allocated object type has
+ // new-extended alignment, the alignment argument is removed from the
+ // argument list, and overload resolution is performed again.
+ if (PassAlignment) {
+ PassAlignment = false;
+ AlignArg = Args[1];
+ Args.erase(Args.begin() + 1);
+ return resolveAllocationOverload(S, R, Range, Args, PassAlignment,
+ Operator, &Candidates, AlignArg);
+ }
- if (FD->getNumParams() == 1)
+ // MSVC will fall back on trying to find a matching global operator new
+ // if operator new[] cannot be found. Also, MSVC will leak by not
+ // generating a call to operator delete or operator delete[], but we
+ // will not replicate that bug.
+ // FIXME: Find out how this interacts with the std::align_val_t fallback
+ // once MSVC implements it.
+ if (R.getLookupName().getCXXOverloadedOperator() == OO_Array_New &&
+ S.Context.getLangOpts().MSVCCompat) {
+ R.clear();
+ R.setLookupName(S.Context.DeclarationNames.getCXXOperatorName(OO_New));
+ S.LookupQualifiedName(R, S.Context.getTranslationUnitDecl());
+ // FIXME: This will give bad diagnostics pointing at the wrong functions.
+ return resolveAllocationOverload(S, R, Range, Args, PassAlignment,
+ Operator, nullptr);
+ }
+
+ S.Diag(R.getNameLoc(), diag::err_ovl_no_viable_function_in_call)
+ << R.getLookupName() << Range;
+
+ // If we have aligned candidates, only note the align_val_t candidates
+ // from AlignedCandidates and the non-align_val_t candidates from
+ // Candidates.
+ if (AlignedCandidates) {
+ auto IsAligned = [](OverloadCandidate &C) {
+ return C.Function->getNumParams() > 1 &&
+ C.Function->getParamDecl(1)->getType()->isAlignValT();
+ };
+ auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
+
+ // This was an overaligned allocation, so list the aligned candidates
+ // first.
+ Args.insert(Args.begin() + 1, AlignArg);
+ AlignedCandidates->NoteCandidates(S, OCD_AllCandidates, Args, "",
+ R.getNameLoc(), IsAligned);
+ Args.erase(Args.begin() + 1);
+ Candidates.NoteCandidates(S, OCD_AllCandidates, Args, "", R.getNameLoc(),
+ IsUnaligned);
+ } else {
+ Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ }
+ return true;
+
+ case OR_Ambiguous:
+ S.Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call)
+ << R.getLookupName() << Range;
+ Candidates.NoteCandidates(S, OCD_ViableCandidates, Args);
return true;
- return S.getLangOpts().SizedDeallocation && FD->getNumParams() == 2 &&
- S.Context.hasSameUnqualifiedType(FD->getParamDecl(1)->getType(),
- S.Context.getSizeType());
+ case OR_Deleted: {
+ S.Diag(R.getNameLoc(), diag::err_ovl_deleted_call)
+ << Best->Function->isDeleted()
+ << R.getLookupName()
+ << S.getDeletedOrUnavailableSuffix(Best->Function)
+ << Range;
+ Candidates.NoteCandidates(S, OCD_AllCandidates, Args);
+ return true;
+ }
+ }
+ llvm_unreachable("Unreachable, bad result from BestViableFunction");
}
+
/// FindAllocationFunctions - Finds the overloads of operator new and delete
/// that are appropriate for the allocation.
bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
bool UseGlobal, QualType AllocType,
- bool IsArray, MultiExprArg PlaceArgs,
+ bool IsArray, bool &PassAlignment,
+ MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete) {
// --- Choosing an allocation function ---
@@ -1962,16 +2178,29 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// 3) The first argument is always size_t. Append the arguments from the
// placement form.
- SmallVector<Expr*, 8> AllocArgs(1 + PlaceArgs.size());
- // We don't care about the actual value of this argument.
+ SmallVector<Expr*, 8> AllocArgs;
+ AllocArgs.reserve((PassAlignment ? 2 : 1) + PlaceArgs.size());
+
+ // We don't care about the actual value of these arguments.
// FIXME: Should the Sema create the expression and embed it in the syntax
// tree? Or should the consumer just recalculate the value?
+ // FIXME: Using a dummy value will interact poorly with attribute enable_if.
IntegerLiteral Size(Context, llvm::APInt::getNullValue(
Context.getTargetInfo().getPointerWidth(0)),
Context.getSizeType(),
SourceLocation());
- AllocArgs[0] = &Size;
- std::copy(PlaceArgs.begin(), PlaceArgs.end(), AllocArgs.begin() + 1);
+ AllocArgs.push_back(&Size);
+
+ QualType AlignValT = Context.VoidTy;
+ if (PassAlignment) {
+ DeclareGlobalNewDelete();
+ AlignValT = Context.getTypeDeclType(getStdAlignValT());
+ }
+ CXXScalarValueInitExpr Align(AlignValT, nullptr, SourceLocation());
+ if (PassAlignment)
+ AllocArgs.push_back(&Align);
+
+ AllocArgs.insert(AllocArgs.end(), PlaceArgs.begin(), PlaceArgs.end());
// C++ [expr.new]p8:
// If the allocated type is a non-array type, the allocation
@@ -1980,50 +2209,57 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// type, the allocation function's name is operator new[] and the
// deallocation function's name is operator delete[].
DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
- IsArray ? OO_Array_New : OO_New);
- DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
- IsArray ? OO_Array_Delete : OO_Delete);
+ IsArray ? OO_Array_New : OO_New);
QualType AllocElemType = Context.getBaseElementType(AllocType);
- if (AllocElemType->isRecordType() && !UseGlobal) {
- CXXRecordDecl *Record
- = cast<CXXRecordDecl>(AllocElemType->getAs<RecordType>()->getDecl());
- if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, Record,
- /*AllowMissing=*/true, OperatorNew))
+ // Find the allocation function.
+ {
+ LookupResult R(*this, NewName, StartLoc, LookupOrdinaryName);
+
+ // C++1z [expr.new]p9:
+ // If the new-expression begins with a unary :: operator, the allocation
+ // function's name is looked up in the global scope. Otherwise, if the
+ // allocated type is a class type T or array thereof, the allocation
+ // function's name is looked up in the scope of T.
+ if (AllocElemType->isRecordType() && !UseGlobal)
+ LookupQualifiedName(R, AllocElemType->getAsCXXRecordDecl());
+
+ // We can see ambiguity here if the allocation function is found in
+ // multiple base classes.
+ if (R.isAmbiguous())
return true;
- }
- if (!OperatorNew) {
- // Didn't find a member overload. Look for a global one.
- DeclareGlobalNewDelete();
- DeclContext *TUDecl = Context.getTranslationUnitDecl();
- bool FallbackEnabled = IsArray && Context.getLangOpts().MSVCCompat;
- if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, TUDecl,
- /*AllowMissing=*/FallbackEnabled, OperatorNew,
- /*Diagnose=*/!FallbackEnabled)) {
- if (!FallbackEnabled)
- return true;
+ // If this lookup fails to find the name, or if the allocated type is not
+ // a class type, the allocation function's name is looked up in the
+ // global scope.
+ if (R.empty())
+ LookupQualifiedName(R, Context.getTranslationUnitDecl());
+
+ assert(!R.empty() && "implicitly declared allocation functions not found");
+ assert(!R.isAmbiguous() && "global allocation functions are ambiguous");
- // MSVC will fall back on trying to find a matching global operator new
- // if operator new[] cannot be found. Also, MSVC will leak by not
- // generating a call to operator delete or operator delete[], but we
- // will not replicate that bug.
- NewName = Context.DeclarationNames.getCXXOperatorName(OO_New);
- DeleteName = Context.DeclarationNames.getCXXOperatorName(OO_Delete);
- if (FindAllocationOverload(StartLoc, Range, NewName, AllocArgs, TUDecl,
- /*AllowMissing=*/false, OperatorNew))
+ // We do our own custom access checks below.
+ R.suppressDiagnostics();
+
+ if (resolveAllocationOverload(*this, R, Range, AllocArgs, PassAlignment,
+ OperatorNew))
return true;
- }
}
- // We don't need an operator delete if we're running under
- // -fno-exceptions.
+ // We don't need an operator delete if we're running under -fno-exceptions.
if (!getLangOpts().Exceptions) {
OperatorDelete = nullptr;
return false;
}
+ // Note, the name of OperatorNew might have been changed from array to
+ // non-array by resolveAllocationOverload.
+ DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
+ OperatorNew->getDeclName().getCXXOverloadedOperator() == OO_Array_New
+ ? OO_Array_Delete
+ : OO_Delete);
+
// C++ [expr.new]p19:
//
// If the new-expression begins with a unary :: operator, the
@@ -2042,6 +2278,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
if (FoundDelete.isAmbiguous())
return true; // FIXME: clean up expressions?
+ bool FoundGlobalDelete = FoundDelete.empty();
if (FoundDelete.empty()) {
DeclareGlobalNewDelete();
LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl());
@@ -2056,7 +2293,16 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// we had explicit placement arguments. This matters for things like
// struct A { void *operator new(size_t, int = 0); ... };
// A *a = new A()
- bool isPlacementNew = (!PlaceArgs.empty() || OperatorNew->param_size() != 1);
+ //
+ // We don't have any definition for what a "placement allocation function"
+ // is, but we assume it's any allocation function whose
+ // parameter-declaration-clause is anything other than (size_t).
+ //
+ // FIXME: Should (size_t, std::align_val_t) also be considered non-placement?
+ // This affects whether an exception from the constructor of an overaligned
+ // type uses the sized or non-sized form of aligned operator delete.
+ bool isPlacementNew = !PlaceArgs.empty() || OperatorNew->param_size() != 1 ||
+ OperatorNew->isVariadic();
if (isPlacementNew) {
// C++ [expr.new]p20:
@@ -2069,8 +2315,6 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
// To perform this comparison, we compute the function type that
// the deallocation function should have, and use that type both
// for template argument deduction and for comparison purposes.
- //
- // FIXME: this comparison should ignore CC and the like.
QualType ExpectedFunctionType;
{
const FunctionProtoType *Proto
@@ -2082,6 +2326,7 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
ArgTypes.push_back(Proto->getParamType(I));
FunctionProtoType::ExtProtoInfo EPI;
+ // FIXME: This is not part of the standard's rule.
EPI.Variadic = Proto->isVariadic();
ExpectedFunctionType
@@ -2092,8 +2337,8 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
DEnd = FoundDelete.end();
D != DEnd; ++D) {
FunctionDecl *Fn = nullptr;
- if (FunctionTemplateDecl *FnTmpl
- = dyn_cast<FunctionTemplateDecl>((*D)->getUnderlyingDecl())) {
+ if (FunctionTemplateDecl *FnTmpl =
+ dyn_cast<FunctionTemplateDecl>((*D)->getUnderlyingDecl())) {
// Perform template argument deduction to try to match the
// expected function type.
TemplateDeductionInfo Info(StartLoc);
@@ -2103,38 +2348,35 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
} else
Fn = cast<FunctionDecl>((*D)->getUnderlyingDecl());
- if (Context.hasSameType(Fn->getType(), ExpectedFunctionType))
+ if (Context.hasSameType(adjustCCAndNoReturn(Fn->getType(),
+ ExpectedFunctionType,
+ /*AdjustExcpetionSpec*/true),
+ ExpectedFunctionType))
Matches.push_back(std::make_pair(D.getPair(), Fn));
}
- } else {
- // C++ [expr.new]p20:
- // [...] Any non-placement deallocation function matches a
- // non-placement allocation function. [...]
- for (LookupResult::iterator D = FoundDelete.begin(),
- DEnd = FoundDelete.end();
- D != DEnd; ++D) {
- if (FunctionDecl *Fn = dyn_cast<FunctionDecl>((*D)->getUnderlyingDecl()))
- if (isNonPlacementDeallocationFunction(*this, Fn))
- Matches.push_back(std::make_pair(D.getPair(), Fn));
- }
+ if (getLangOpts().CUDA)
+ EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(CurContext), Matches);
+ } else {
// C++1y [expr.new]p22:
// For a non-placement allocation function, the normal deallocation
// function lookup is used
- // C++1y [expr.delete]p?:
- // If [...] deallocation function lookup finds both a usual deallocation
- // function with only a pointer parameter and a usual deallocation
- // function with both a pointer parameter and a size parameter, then the
- // selected deallocation function shall be the one with two parameters.
- // Otherwise, the selected deallocation function shall be the function
- // with one parameter.
- if (getLangOpts().SizedDeallocation && Matches.size() == 2) {
- if (Matches[0].second->getNumParams() == 1)
- Matches.erase(Matches.begin());
- else
- Matches.erase(Matches.begin() + 1);
- assert(Matches[0].second->getNumParams() == 2 &&
- "found an unexpected usual deallocation function");
+ //
+ // Per [expr.delete]p10, this lookup prefers a member operator delete
+ // without a size_t argument, but prefers a non-member operator delete
+ // with a size_t where possible (which it always is in this case).
+ llvm::SmallVector<UsualDeallocFnInfo, 4> BestDeallocFns;
+ UsualDeallocFnInfo Selected = resolveDeallocationOverload(
+ *this, FoundDelete, /*WantSize*/ FoundGlobalDelete,
+ /*WantAlign*/ hasNewExtendedAlignment(*this, AllocElemType),
+ &BestDeallocFns);
+ if (Selected)
+ Matches.push_back(std::make_pair(Selected.Found, Selected.FD));
+ else {
+ // If we failed to select an operator, all remaining functions are viable
+ // but ambiguous.
+ for (auto Fn : BestDeallocFns)
+ Matches.push_back(std::make_pair(Fn.Found, Fn.FD));
}
}
@@ -2145,130 +2387,59 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
if (Matches.size() == 1) {
OperatorDelete = Matches[0].second;
- // C++0x [expr.new]p20:
- // If the lookup finds the two-parameter form of a usual
- // deallocation function (3.7.4.2) and that function, considered
+ // C++1z [expr.new]p23:
+ // If the lookup finds a usual deallocation function (3.7.4.2)
+ // with a parameter of type std::size_t and that function, considered
// as a placement deallocation function, would have been
// selected as a match for the allocation function, the program
// is ill-formed.
- if (!PlaceArgs.empty() && getLangOpts().CPlusPlus11 &&
+ if (getLangOpts().CPlusPlus11 && isPlacementNew &&
isNonPlacementDeallocationFunction(*this, OperatorDelete)) {
- Diag(StartLoc, diag::err_placement_new_non_placement_delete)
- << SourceRange(PlaceArgs.front()->getLocStart(),
- PlaceArgs.back()->getLocEnd());
- if (!OperatorDelete->isImplicit())
- Diag(OperatorDelete->getLocation(), diag::note_previous_decl)
- << DeleteName;
- } else {
- CheckAllocationAccess(StartLoc, Range, FoundDelete.getNamingClass(),
- Matches[0].first);
- }
- }
-
- return false;
-}
-
-/// \brief Find an fitting overload for the allocation function
-/// in the specified scope.
-///
-/// \param StartLoc The location of the 'new' token.
-/// \param Range The range of the placement arguments.
-/// \param Name The name of the function ('operator new' or 'operator new[]').
-/// \param Args The placement arguments specified.
-/// \param Ctx The scope in which we should search; either a class scope or the
-/// translation unit.
-/// \param AllowMissing If \c true, report an error if we can't find any
-/// allocation functions. Otherwise, succeed but don't fill in \p
-/// Operator.
-/// \param Operator Filled in with the found allocation function. Unchanged if
-/// no allocation function was found.
-/// \param Diagnose If \c true, issue errors if the allocation function is not
-/// usable.
-bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
- DeclarationName Name, MultiExprArg Args,
- DeclContext *Ctx,
- bool AllowMissing, FunctionDecl *&Operator,
- bool Diagnose) {
- LookupResult R(*this, Name, StartLoc, LookupOrdinaryName);
- LookupQualifiedName(R, Ctx);
- if (R.empty()) {
- if (AllowMissing || !Diagnose)
- return false;
- return Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
- << Name << Range;
- }
-
- if (R.isAmbiguous())
- return true;
-
- R.suppressDiagnostics();
-
- OverloadCandidateSet Candidates(StartLoc, OverloadCandidateSet::CSK_Normal);
- for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
- Alloc != AllocEnd; ++Alloc) {
- // Even member operator new/delete are implicitly treated as
- // static, so don't use AddMemberCandidate.
- NamedDecl *D = (*Alloc)->getUnderlyingDecl();
+ UsualDeallocFnInfo Info(*this,
+ DeclAccessPair::make(OperatorDelete, AS_public));
+ // Core issue, per mail to core reflector, 2016-10-09:
+ // If this is a member operator delete, and there is a corresponding
+ // non-sized member operator delete, this isn't /really/ a sized
+ // deallocation function, it just happens to have a size_t parameter.
+ bool IsSizedDelete = Info.HasSizeT;
+ if (IsSizedDelete && !FoundGlobalDelete) {
+ auto NonSizedDelete =
+ resolveDeallocationOverload(*this, FoundDelete, /*WantSize*/false,
+ /*WantAlign*/Info.HasAlignValT);
+ if (NonSizedDelete && !NonSizedDelete.HasSizeT &&
+ NonSizedDelete.HasAlignValT == Info.HasAlignValT)
+ IsSizedDelete = false;
+ }
- if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
- AddTemplateOverloadCandidate(FnTemplate, Alloc.getPair(),
- /*ExplicitTemplateArgs=*/nullptr,
- Args, Candidates,
- /*SuppressUserConversions=*/false);
- continue;
+ if (IsSizedDelete) {
+ SourceRange R = PlaceArgs.empty()
+ ? SourceRange()
+ : SourceRange(PlaceArgs.front()->getLocStart(),
+ PlaceArgs.back()->getLocEnd());
+ Diag(StartLoc, diag::err_placement_new_non_placement_delete) << R;
+ if (!OperatorDelete->isImplicit())
+ Diag(OperatorDelete->getLocation(), diag::note_previous_decl)
+ << DeleteName;
+ }
}
- FunctionDecl *Fn = cast<FunctionDecl>(D);
- AddOverloadCandidate(Fn, Alloc.getPair(), Args, Candidates,
- /*SuppressUserConversions=*/false);
- }
-
- // Do the resolution.
- OverloadCandidateSet::iterator Best;
- switch (Candidates.BestViableFunction(*this, StartLoc, Best)) {
- case OR_Success: {
- // Got one!
- FunctionDecl *FnDecl = Best->Function;
- if (CheckAllocationAccess(StartLoc, Range, R.getNamingClass(),
- Best->FoundDecl, Diagnose) == AR_inaccessible)
- return true;
+ CheckAllocationAccess(StartLoc, Range, FoundDelete.getNamingClass(),
+ Matches[0].first);
+ } else if (!Matches.empty()) {
+ // We found multiple suitable operators. Per [expr.new]p20, that means we
+ // call no 'operator delete' function, but we should at least warn the user.
+ // FIXME: Suppress this warning if the construction cannot throw.
+ Diag(StartLoc, diag::warn_ambiguous_suitable_delete_function_found)
+ << DeleteName << AllocElemType;
- Operator = FnDecl;
- return false;
+ for (auto &Match : Matches)
+ Diag(Match.second->getLocation(),
+ diag::note_member_declared_here) << DeleteName;
}
- case OR_No_Viable_Function:
- if (Diagnose) {
- Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
- << Name << Range;
- Candidates.NoteCandidates(*this, OCD_AllCandidates, Args);
- }
- return true;
-
- case OR_Ambiguous:
- if (Diagnose) {
- Diag(StartLoc, diag::err_ovl_ambiguous_call)
- << Name << Range;
- Candidates.NoteCandidates(*this, OCD_ViableCandidates, Args);
- }
- return true;
-
- case OR_Deleted: {
- if (Diagnose) {
- Diag(StartLoc, diag::err_ovl_deleted_call)
- << Best->Function->isDeleted()
- << Name
- << getDeletedOrUnavailableSuffix(Best->Function)
- << Range;
- Candidates.NoteCandidates(*this, OCD_AllCandidates, Args);
- }
- return true;
- }
- }
- llvm_unreachable("Unreachable, bad result from BestViableFunction");
+ return false;
}
-
/// DeclareGlobalNewDelete - Declare the global forms of operator new and
/// delete. These are:
/// @code
@@ -2336,41 +2507,64 @@ void Sema::DeclareGlobalNewDelete() {
nullptr);
getStdBadAlloc()->setImplicit(true);
}
+ if (!StdAlignValT && getLangOpts().AlignedAllocation) {
+ // The "std::align_val_t" enum class has not yet been declared, so build it
+ // implicitly.
+ auto *AlignValT = EnumDecl::Create(
+ Context, getOrCreateStdNamespace(), SourceLocation(), SourceLocation(),
+ &PP.getIdentifierTable().get("align_val_t"), nullptr, true, true, true);
+ AlignValT->setIntegerType(Context.getSizeType());
+ AlignValT->setPromotionType(Context.getSizeType());
+ AlignValT->setImplicit(true);
+ StdAlignValT = AlignValT;
+ }
GlobalNewDeleteDeclared = true;
QualType VoidPtr = Context.getPointerType(Context.VoidTy);
QualType SizeT = Context.getSizeType();
- DeclareGlobalAllocationFunction(
- Context.DeclarationNames.getCXXOperatorName(OO_New),
- VoidPtr, SizeT, QualType());
- DeclareGlobalAllocationFunction(
- Context.DeclarationNames.getCXXOperatorName(OO_Array_New),
- VoidPtr, SizeT, QualType());
- DeclareGlobalAllocationFunction(
- Context.DeclarationNames.getCXXOperatorName(OO_Delete),
- Context.VoidTy, VoidPtr);
- DeclareGlobalAllocationFunction(
- Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete),
- Context.VoidTy, VoidPtr);
- if (getLangOpts().SizedDeallocation) {
- DeclareGlobalAllocationFunction(
- Context.DeclarationNames.getCXXOperatorName(OO_Delete),
- Context.VoidTy, VoidPtr, Context.getSizeType());
- DeclareGlobalAllocationFunction(
- Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete),
- Context.VoidTy, VoidPtr, Context.getSizeType());
- }
+ auto DeclareGlobalAllocationFunctions = [&](OverloadedOperatorKind Kind,
+ QualType Return, QualType Param) {
+ llvm::SmallVector<QualType, 3> Params;
+ Params.push_back(Param);
+
+ // Create up to four variants of the function (sized/aligned).
+ bool HasSizedVariant = getLangOpts().SizedDeallocation &&
+ (Kind == OO_Delete || Kind == OO_Array_Delete);
+ bool HasAlignedVariant = getLangOpts().AlignedAllocation;
+
+ int NumSizeVariants = (HasSizedVariant ? 2 : 1);
+ int NumAlignVariants = (HasAlignedVariant ? 2 : 1);
+ for (int Sized = 0; Sized < NumSizeVariants; ++Sized) {
+ if (Sized)
+ Params.push_back(SizeT);
+
+ for (int Aligned = 0; Aligned < NumAlignVariants; ++Aligned) {
+ if (Aligned)
+ Params.push_back(Context.getTypeDeclType(getStdAlignValT()));
+
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(Kind), Return, Params);
+
+ if (Aligned)
+ Params.pop_back();
+ }
+ }
+ };
+
+ DeclareGlobalAllocationFunctions(OO_New, VoidPtr, SizeT);
+ DeclareGlobalAllocationFunctions(OO_Array_New, VoidPtr, SizeT);
+ DeclareGlobalAllocationFunctions(OO_Delete, Context.VoidTy, VoidPtr);
+ DeclareGlobalAllocationFunctions(OO_Array_Delete, Context.VoidTy, VoidPtr);
}
/// DeclareGlobalAllocationFunction - Declares a single implicit global
/// allocation function if it doesn't already exist.
void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
QualType Return,
- QualType Param1, QualType Param2) {
+ ArrayRef<QualType> Params) {
DeclContext *GlobalCtx = Context.getTranslationUnitDecl();
- unsigned NumParams = Param2.isNull() ? 1 : 2;
// Check if this function is already declared.
DeclContext::lookup_result R = GlobalCtx->lookup(Name);
@@ -2379,18 +2573,12 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
// Only look at non-template functions, as it is the predefined,
// non-templated allocation function we are trying to declare here.
if (FunctionDecl *Func = dyn_cast<FunctionDecl>(*Alloc)) {
- if (Func->getNumParams() == NumParams) {
- QualType InitialParam1Type =
- Context.getCanonicalType(Func->getParamDecl(0)
- ->getType().getUnqualifiedType());
- QualType InitialParam2Type =
- NumParams == 2
- ? Context.getCanonicalType(Func->getParamDecl(1)
- ->getType().getUnqualifiedType())
- : QualType();
- // FIXME: Do we need to check for default arguments here?
- if (InitialParam1Type == Param1 &&
- (NumParams == 1 || InitialParam2Type == Param2)) {
+ if (Func->getNumParams() == Params.size()) {
+ llvm::SmallVector<QualType, 3> FuncParams;
+ for (auto *P : Func->parameters())
+ FuncParams.push_back(
+ Context.getCanonicalType(P->getType().getUnqualifiedType()));
+ if (llvm::makeArrayRef(FuncParams) == Params) {
// Make the function visible to name lookup, even if we found it in
// an unimported module. It either is an implicitly-declared global
// allocation function, or is suppressing that function.
@@ -2419,82 +2607,80 @@ void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
getLangOpts().CPlusPlus11 ? EST_BasicNoexcept : EST_DynamicNone;
}
- QualType Params[] = { Param1, Param2 };
-
- QualType FnType = Context.getFunctionType(
- Return, llvm::makeArrayRef(Params, NumParams), EPI);
- FunctionDecl *Alloc =
- FunctionDecl::Create(Context, GlobalCtx, SourceLocation(),
- SourceLocation(), Name,
- FnType, /*TInfo=*/nullptr, SC_None, false, true);
- Alloc->setImplicit();
-
- // Implicit sized deallocation functions always have default visibility.
- Alloc->addAttr(VisibilityAttr::CreateImplicit(Context,
- VisibilityAttr::Default));
-
- ParmVarDecl *ParamDecls[2];
- for (unsigned I = 0; I != NumParams; ++I) {
- ParamDecls[I] = ParmVarDecl::Create(Context, Alloc, SourceLocation(),
- SourceLocation(), nullptr,
- Params[I], /*TInfo=*/nullptr,
- SC_None, nullptr);
- ParamDecls[I]->setImplicit();
- }
- Alloc->setParams(llvm::makeArrayRef(ParamDecls, NumParams));
-
- Context.getTranslationUnitDecl()->addDecl(Alloc);
- IdResolver.tryAddTopLevelDecl(Alloc, Name);
+ auto CreateAllocationFunctionDecl = [&](Attr *ExtraAttr) {
+ QualType FnType = Context.getFunctionType(Return, Params, EPI);
+ FunctionDecl *Alloc = FunctionDecl::Create(
+ Context, GlobalCtx, SourceLocation(), SourceLocation(), Name,
+ FnType, /*TInfo=*/nullptr, SC_None, false, true);
+ Alloc->setImplicit();
+
+ // Implicit sized deallocation functions always have default visibility.
+ Alloc->addAttr(
+ VisibilityAttr::CreateImplicit(Context, VisibilityAttr::Default));
+
+ llvm::SmallVector<ParmVarDecl *, 3> ParamDecls;
+ for (QualType T : Params) {
+ ParamDecls.push_back(ParmVarDecl::Create(
+ Context, Alloc, SourceLocation(), SourceLocation(), nullptr, T,
+ /*TInfo=*/nullptr, SC_None, nullptr));
+ ParamDecls.back()->setImplicit();
+ }
+ Alloc->setParams(ParamDecls);
+ if (ExtraAttr)
+ Alloc->addAttr(ExtraAttr);
+ Context.getTranslationUnitDecl()->addDecl(Alloc);
+ IdResolver.tryAddTopLevelDecl(Alloc, Name);
+ };
+
+ if (!LangOpts.CUDA)
+ CreateAllocationFunctionDecl(nullptr);
+ else {
+ // Host and device get their own declaration so each can be
+ // defined or re-declared independently.
+ CreateAllocationFunctionDecl(CUDAHostAttr::CreateImplicit(Context));
+ CreateAllocationFunctionDecl(CUDADeviceAttr::CreateImplicit(Context));
+ }
}
FunctionDecl *Sema::FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
+ bool Overaligned,
DeclarationName Name) {
DeclareGlobalNewDelete();
LookupResult FoundDelete(*this, Name, StartLoc, LookupOrdinaryName);
LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl());
- // C++ [expr.new]p20:
- // [...] Any non-placement deallocation function matches a
- // non-placement allocation function. [...]
- llvm::SmallVector<FunctionDecl*, 2> Matches;
- for (LookupResult::iterator D = FoundDelete.begin(),
- DEnd = FoundDelete.end();
- D != DEnd; ++D) {
- if (FunctionDecl *Fn = dyn_cast<FunctionDecl>(*D))
- if (isNonPlacementDeallocationFunction(*this, Fn))
- Matches.push_back(Fn);
- }
-
- // C++1y [expr.delete]p?:
- // If the type is complete and deallocation function lookup finds both a
- // usual deallocation function with only a pointer parameter and a usual
- // deallocation function with both a pointer parameter and a size
- // parameter, then the selected deallocation function shall be the one
- // with two parameters. Otherwise, the selected deallocation function
- // shall be the function with one parameter.
- if (getLangOpts().SizedDeallocation && Matches.size() == 2) {
- unsigned NumArgs = CanProvideSize ? 2 : 1;
- if (Matches[0]->getNumParams() != NumArgs)
- Matches.erase(Matches.begin());
- else
- Matches.erase(Matches.begin() + 1);
- assert(Matches[0]->getNumParams() == NumArgs &&
- "found an unexpected usual deallocation function");
- }
+ // FIXME: It's possible for this to result in ambiguity, through a
+ // user-declared variadic operator delete or the enable_if attribute. We
+ // should probably not consider those cases to be usual deallocation
+ // functions. But for now we just make an arbitrary choice in that case.
+ auto Result = resolveDeallocationOverload(*this, FoundDelete, CanProvideSize,
+ Overaligned);
+ assert(Result.FD && "operator delete missing from global scope?");
+ return Result.FD;
+}
- if (getLangOpts().CUDA)
- EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(CurContext), Matches);
+FunctionDecl *Sema::FindDeallocationFunctionForDestructor(SourceLocation Loc,
+ CXXRecordDecl *RD) {
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Delete);
- assert(Matches.size() == 1 &&
- "unexpectedly have multiple usual deallocation functions");
- return Matches.front();
+ FunctionDecl *OperatorDelete = nullptr;
+ if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete))
+ return nullptr;
+ if (OperatorDelete)
+ return OperatorDelete;
+
+ // If there's no class-specific operator delete, look up the global
+ // non-array delete.
+ return FindUsualDeallocationFunction(
+ Loc, true, hasNewExtendedAlignment(*this, Context.getRecordType(RD)),
+ Name);
}
bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name,
- FunctionDecl* &Operator, bool Diagnose) {
+ FunctionDecl *&Operator, bool Diagnose) {
LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
// Try to find operator delete/operator delete[] in class scope.
LookupQualifiedName(Found, RD);
@@ -2504,27 +2690,20 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
Found.suppressDiagnostics();
- SmallVector<DeclAccessPair,4> Matches;
- for (LookupResult::iterator F = Found.begin(), FEnd = Found.end();
- F != FEnd; ++F) {
- NamedDecl *ND = (*F)->getUnderlyingDecl();
+ bool Overaligned = hasNewExtendedAlignment(*this, Context.getRecordType(RD));
- // Ignore template operator delete members from the check for a usual
- // deallocation function.
- if (isa<FunctionTemplateDecl>(ND))
- continue;
+ // C++17 [expr.delete]p10:
+ // If the deallocation functions have class scope, the one without a
+ // parameter of type std::size_t is selected.
+ llvm::SmallVector<UsualDeallocFnInfo, 4> Matches;
+ resolveDeallocationOverload(*this, Found, /*WantSize*/ false,
+ /*WantAlign*/ Overaligned, &Matches);
- if (cast<CXXMethodDecl>(ND)->isUsualDeallocationFunction())
- Matches.push_back(F.getPair());
- }
-
- if (getLangOpts().CUDA)
- EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(CurContext), Matches);
-
- // There's exactly one suitable operator; pick it.
+ // If we could find an overload, use it.
if (Matches.size() == 1) {
- Operator = cast<CXXMethodDecl>(Matches[0]->getUnderlyingDecl());
+ Operator = cast<CXXMethodDecl>(Matches[0].FD);
+ // FIXME: DiagnoseUseOfDecl?
if (Operator->isDeleted()) {
if (Diagnose) {
Diag(StartLoc, diag::err_deleted_function_use);
@@ -2534,21 +2713,21 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
}
if (CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(),
- Matches[0], Diagnose) == AR_inaccessible)
+ Matches[0].Found, Diagnose) == AR_inaccessible)
return true;
return false;
+ }
- // We found multiple suitable operators; complain about the ambiguity.
- } else if (!Matches.empty()) {
+ // We found multiple suitable operators; complain about the ambiguity.
+ // FIXME: The standard doesn't say to do this; it appears that the intent
+ // is that this should never happen.
+ if (!Matches.empty()) {
if (Diagnose) {
Diag(StartLoc, diag::err_ambiguous_suitable_delete_member_function_found)
<< Name << RD;
-
- for (SmallVectorImpl<DeclAccessPair>::iterator
- F = Matches.begin(), FEnd = Matches.end(); F != FEnd; ++F)
- Diag((*F)->getUnderlyingDecl()->getLocation(),
- diag::note_member_declared_here) << Name;
+ for (auto &Match : Matches)
+ Diag(Match.FD->getLocation(), diag::note_member_declared_here) << Name;
}
return true;
}
@@ -2560,9 +2739,8 @@ bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
Diag(StartLoc, diag::err_no_suitable_delete_member_function_found)
<< Name << RD;
- for (LookupResult::iterator F = Found.begin(), FEnd = Found.end();
- F != FEnd; ++F)
- Diag((*F)->getUnderlyingDecl()->getLocation(),
+ for (NamedDecl *D : Found)
+ Diag(D->getUnderlyingDecl()->getLocation(),
diag::note_member_declared_here) << Name;
}
return true;
@@ -2593,7 +2771,7 @@ public:
/// translation unit. False, if this is the initial analysis at the point
/// delete-expression was encountered.
explicit MismatchingNewDeleteDetector(bool EndOfTU)
- : IsArrayForm(false), Field(nullptr), EndOfTU(EndOfTU),
+ : Field(nullptr), IsArrayForm(false), EndOfTU(EndOfTU),
HasUndefinedConstructors(false) {}
/// \brief Checks whether pointee of a delete-expression is initialized with
@@ -2612,11 +2790,11 @@ public:
/// \param DeleteWasArrayForm Array form-ness of the delete-expression used
/// for deleting the \p Field.
MismatchResult analyzeField(FieldDecl *Field, bool DeleteWasArrayForm);
+ FieldDecl *Field;
/// List of mismatching new-expressions used for initialization of the pointee
llvm::SmallVector<const CXXNewExpr *, 4> NewExprs;
/// Indicates whether delete-expression was in array form.
bool IsArrayForm;
- FieldDecl *Field;
private:
const bool EndOfTU;
@@ -2921,7 +3099,7 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
QualType PointeeElem = Context.getBaseElementType(Pointee);
if (unsigned AddressSpace = Pointee.getAddressSpace())
- return Diag(Ex.get()->getLocStart(),
+ return Diag(Ex.get()->getLocStart(),
diag::err_address_space_qualified_delete)
<< Pointee.getUnqualifiedType() << AddressSpace;
@@ -2973,7 +3151,10 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
// Otherwise, the usual operator delete[] should be the
// function we just found.
else if (OperatorDelete && isa<CXXMethodDecl>(OperatorDelete))
- UsualArrayDeleteWantsSize = (OperatorDelete->getNumParams() == 2);
+ UsualArrayDeleteWantsSize =
+ UsualDeallocFnInfo(*this,
+ DeclAccessPair::make(OperatorDelete, AS_public))
+ .HasSizeT;
}
if (!PointeeRD->hasIrrelevantDestructor())
@@ -2990,20 +3171,24 @@ Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
SourceLocation());
}
- if (!OperatorDelete)
+ if (!OperatorDelete) {
+ bool IsComplete = isCompleteType(StartLoc, Pointee);
+ bool CanProvideSize =
+ IsComplete && (!ArrayForm || UsualArrayDeleteWantsSize ||
+ Pointee.isDestructedType());
+ bool Overaligned = hasNewExtendedAlignment(*this, Pointee);
+
// Look for a global declaration.
- OperatorDelete = FindUsualDeallocationFunction(
- StartLoc, isCompleteType(StartLoc, Pointee) &&
- (!ArrayForm || UsualArrayDeleteWantsSize ||
- Pointee.isDestructedType()),
- DeleteName);
+ OperatorDelete = FindUsualDeallocationFunction(StartLoc, CanProvideSize,
+ Overaligned, DeleteName);
+ }
MarkFunctionReferenced(StartLoc, OperatorDelete);
// Check access and ambiguity of operator delete and destructor.
if (PointeeRD) {
if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) {
- CheckDestructorAccess(Ex.get()->getExprLoc(), Dtor,
+ CheckDestructorAccess(Ex.get()->getExprLoc(), Dtor,
PDiag(diag::err_access_dtor) << PointeeElem);
}
}
@@ -3234,7 +3419,7 @@ static ExprResult BuildCXXCastArgument(Sema &S,
ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence &ICS,
- AssignmentAction Action,
+ AssignmentAction Action,
CheckedConversionKind CCK) {
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion: {
@@ -3309,6 +3494,10 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
llvm_unreachable("Cannot perform an ellipsis conversion");
case ImplicitConversionSequence::BadConversion:
+ bool Diagnosed =
+ DiagnoseAssignmentResult(Incompatible, From->getExprLoc(), ToType,
+ From->getType(), From, Action);
+ assert(Diagnosed && "failed to diagnose bad conversion"); (void)Diagnosed;
return ExprError();
}
@@ -3324,16 +3513,16 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
ExprResult
Sema::PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
- AssignmentAction Action,
+ AssignmentAction Action,
CheckedConversionKind CCK) {
bool CStyle = (CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast);
-
+
// Overall FIXME: we are recomputing too many types here and doing far too
// much extra work. What this means is that we need to keep track of more
// information that is computed when we try the implicit conversion initially,
// so that we don't need to recompute anything here.
QualType FromType = From->getType();
-
+
if (SCS.CopyConstructor) {
// FIXME: When can ToType be a reference type?
assert(!ToType->isReferenceType());
@@ -3403,13 +3592,13 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Array_To_Pointer:
FromType = Context.getArrayDecayedType(FromType);
- From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay,
+ From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
case ICK_Function_To_Pointer:
FromType = Context.getPointerType(FromType);
- From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay,
+ From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
@@ -3446,16 +3635,6 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// Nothing else to do.
break;
- case ICK_NoReturn_Adjustment:
- // If both sides are functions (or pointers/references to them), there could
- // be incompatible exception declarations.
- if (CheckExceptionSpecCompatibility(From, ToType))
- return ExprError();
-
- From = ImpCastExprToType(From, ToType, CK_NoOp,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
- break;
-
case ICK_Integral_Promotion:
case ICK_Integral_Conversion:
if (ToType->isBooleanType()) {
@@ -3472,7 +3651,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Floating_Promotion:
case ICK_Floating_Conversion:
- From = ImpCastExprToType(From, ToType, CK_FloatingCast,
+ From = ImpCastExprToType(From, ToType, CK_FloatingCast,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
@@ -3491,22 +3670,22 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
} else {
CK = CK_IntegralComplexCast;
}
- From = ImpCastExprToType(From, ToType, CK,
+ From = ImpCastExprToType(From, ToType, CK,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
}
case ICK_Floating_Integral:
if (ToType->isRealFloatingType())
- From = ImpCastExprToType(From, ToType, CK_IntegralToFloating,
+ From = ImpCastExprToType(From, ToType, CK_IntegralToFloating,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
else
- From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral,
+ From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
case ICK_Compatible_Conversion:
- From = ImpCastExprToType(From, ToType, CK_NoOp,
+ From = ImpCastExprToType(From, ToType, CK_NoOp,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
@@ -3528,20 +3707,20 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (From->getType()->isObjCObjectPointerType() &&
ToType->isObjCObjectPointerType())
EmitRelatedResultTypeNote(From);
- }
+ }
else if (getLangOpts().ObjCAutoRefCount &&
- !CheckObjCARCUnavailableWeakConversion(ToType,
+ !CheckObjCARCUnavailableWeakConversion(ToType,
From->getType())) {
if (Action == AA_Initializing)
- Diag(From->getLocStart(),
+ Diag(From->getLocStart(),
diag::err_arc_weak_unavailable_assign);
else
Diag(From->getLocStart(),
- diag::err_arc_convesion_of_weak_unavailable)
- << (Action == AA_Casting) << From->getType() << ToType
+ diag::err_arc_convesion_of_weak_unavailable)
+ << (Action == AA_Casting) << From->getType() << ToType
<< From->getSourceRange();
}
-
+
CastKind Kind = CK_Invalid;
CXXCastPath BasePath;
if (CheckPointerConversion(From, ToType, Kind, BasePath, CStyle))
@@ -3589,7 +3768,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
}
From = ImpCastExprToType(From, Context.BoolTy,
- ScalarTypeToBooleanCastKind(FromType),
+ ScalarTypeToBooleanCastKind(FromType),
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
@@ -3610,7 +3789,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
}
case ICK_Vector_Conversion:
- From = ImpCastExprToType(From, ToType, CK_BitCast,
+ From = ImpCastExprToType(From, ToType, CK_BitCast,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
@@ -3655,7 +3834,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// _Complex x -> x
From = ImpCastExprToType(From, ElType,
isFloatingComplex ? CK_FloatingComplexToReal
- : CK_IntegralComplexToReal,
+ : CK_IntegralComplexToReal,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
// x -> y
@@ -3663,23 +3842,23 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// do nothing
} else if (ToType->isRealFloatingType()) {
From = ImpCastExprToType(From, ToType,
- isFloatingComplex ? CK_FloatingCast : CK_IntegralToFloating,
+ isFloatingComplex ? CK_FloatingCast : CK_IntegralToFloating,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
} else {
assert(ToType->isIntegerType());
From = ImpCastExprToType(From, ToType,
- isFloatingComplex ? CK_FloatingToIntegral : CK_IntegralCast,
+ isFloatingComplex ? CK_FloatingToIntegral : CK_IntegralCast,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
}
}
break;
-
+
case ICK_Block_Pointer_Conversion: {
From = ImpCastExprToType(From, ToType.getUnqualifiedType(), CK_BitCast,
VK_RValue, /*BasePath=*/nullptr, CCK).get();
break;
}
-
+
case ICK_TransparentUnionConversion: {
ExprResult FromRes = From;
Sema::AssignConvertType ConvTy =
@@ -3699,12 +3878,20 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
From->getValueKind()).get();
break;
+ case ICK_Zero_Queue_Conversion:
+ From = ImpCastExprToType(From, ToType,
+ CK_ZeroToOCLQueue,
+ From->getValueKind()).get();
+ break;
+
case ICK_Lvalue_To_Rvalue:
case ICK_Array_To_Pointer:
case ICK_Function_To_Pointer:
+ case ICK_Function_Conversion:
case ICK_Qualification:
case ICK_Num_Conversion_Kinds:
case ICK_C_Only_Conversion:
+ case ICK_Incompatible_Pointer_Conversion:
llvm_unreachable("Improper second standard conversion");
}
@@ -3713,6 +3900,16 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// Nothing to do.
break;
+ case ICK_Function_Conversion:
+ // If both sides are functions (or pointers/references to them), there could
+ // be incompatible exception declarations.
+ if (CheckExceptionSpecCompatibility(From, ToType))
+ return ExprError();
+
+ From = ImpCastExprToType(From, ToType, CK_NoOp,
+ VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ break;
+
case ICK_Qualification: {
// The qualification keeps the category of the inner expression, unless the
// target type isn't a reference.
@@ -3882,8 +4079,8 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
static bool HasNoThrowOperator(const RecordType *RT, OverloadedOperatorKind Op,
Sema &Self, SourceLocation KeyLoc, ASTContext &C,
- bool (CXXRecordDecl::*HasTrivial)() const,
- bool (CXXRecordDecl::*HasNonTrivial)() const,
+ bool (CXXRecordDecl::*HasTrivial)() const,
+ bool (CXXRecordDecl::*HasNonTrivial)() const,
bool (CXXMethodDecl::*IsDesiredOp)() const)
{
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
@@ -3979,7 +4176,7 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return false;
}
}
-
+
return T->isScalarType();
case UTT_IsCompound:
return T->isCompoundType();
@@ -4158,12 +4355,12 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
// false.
if (T.isPODType(C) || T->isReferenceType())
return true;
-
+
// Objective-C++ ARC: autorelease types don't require destruction.
- if (T->isObjCLifetimeType() &&
+ if (T->isObjCLifetimeType() &&
T.getObjCLifetime() == Qualifiers::OCL_Autoreleasing)
return true;
-
+
if (CXXRecordDecl *RD = C.getBaseElementType(T)->getAsCXXRecordDecl())
return RD->hasTrivialDestructor();
return false;
@@ -4345,9 +4542,9 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
// definition for is_constructible, as defined below, is known to call
// no operation that is not trivial.
//
- // The predicate condition for a template specialization
- // is_constructible<T, Args...> shall be satisfied if and only if the
- // following variable definition would be well-formed for some invented
+ // The predicate condition for a template specialization
+ // is_constructible<T, Args...> shall be satisfied if and only if the
+ // following variable definition would be well-formed for some invented
// variable t:
//
// T t(create<Args>()...);
@@ -4361,7 +4558,7 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
if (ArgTy->isVoidType() || ArgTy->isIncompleteArrayType())
continue;
- if (S.RequireCompleteType(KWLoc, ArgTy,
+ if (S.RequireCompleteType(KWLoc, ArgTy,
diag::err_incomplete_type_used_in_type_trait_expr))
return false;
}
@@ -4391,7 +4588,7 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
for (Expr &E : OpaqueArgExprs)
ArgExprs.push_back(&E);
- // Perform the initialization in an unevaluated context within a SFINAE
+ // Perform the initialization in an unevaluated context within a SFINAE
// trap at translation unit scope.
EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
Sema::SFINAETrap SFINAE(S, /*AccessCheckingSFINAE=*/true);
@@ -4430,12 +4627,12 @@ static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
}
default: llvm_unreachable("not a TT");
}
-
+
return false;
}
-ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
- ArrayRef<TypeSourceInfo *> Args,
+ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc) {
QualType ResultType = Context.getLogicalOperationType();
@@ -4464,14 +4661,14 @@ ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
SourceLocation RParenLoc) {
SmallVector<TypeSourceInfo *, 4> ConvertedArgs;
ConvertedArgs.reserve(Args.size());
-
+
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
TypeSourceInfo *TInfo;
QualType T = GetTypeFromParser(Args[I], &TInfo);
if (!TInfo)
TInfo = Context.getTrivialTypeSourceInfo(T, KWLoc);
-
- ConvertedArgs.push_back(TInfo);
+
+ ConvertedArgs.push_back(TInfo);
}
return BuildTypeTrait(Kind, KWLoc, ConvertedArgs, RParenLoc);
@@ -4505,7 +4702,7 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
// If Base and Derived are class types and are different types
// (ignoring possible cv-qualifiers) then Derived shall be a
// complete type.
- if (Self.RequireCompleteType(KeyLoc, RhsT,
+ if (Self.RequireCompleteType(KeyLoc, RhsT,
diag::err_incomplete_type_used_in_type_trait_expr))
return false;
@@ -4522,21 +4719,21 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
// C++0x [meta.rel]p4:
// Given the following function prototype:
//
- // template <class T>
+ // template <class T>
// typename add_rvalue_reference<T>::type create();
//
- // the predicate condition for a template specialization
- // is_convertible<From, To> shall be satisfied if and only if
- // the return expression in the following code would be
+ // the predicate condition for a template specialization
+ // is_convertible<From, To> shall be satisfied if and only if
+ // the return expression in the following code would be
// well-formed, including any implicit conversions to the return
// type of the function:
//
- // To test() {
+ // To test() {
// return create<From>();
// }
//
- // Access checking is performed as if in a context unrelated to To and
- // From. Only the validity of the immediate context of the expression
+ // Access checking is performed as if in a context unrelated to To and
+ // From. Only the validity of the immediate context of the expression
// of the return-statement (including conversions to the return type)
// is considered.
//
@@ -4565,10 +4762,10 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
OpaqueValueExpr From(KeyLoc, LhsT.getNonLValueExprType(Self.Context),
Expr::getValueKindForType(LhsT));
Expr *FromPtr = &From;
- InitializationKind Kind(InitializationKind::CreateCopy(KeyLoc,
+ InitializationKind Kind(InitializationKind::CreateCopy(KeyLoc,
SourceLocation()));
-
- // Perform the initialization in an unevaluated context within a SFINAE
+
+ // Perform the initialization in an unevaluated context within a SFINAE
// trap at translation unit scope.
EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated);
Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
@@ -4590,17 +4787,17 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
// is_assignable, is known to call no operation that is not trivial
//
// is_assignable is defined as:
- // The expression declval<T>() = declval<U>() is well-formed when
+ // The expression declval<T>() = declval<U>() is well-formed when
// treated as an unevaluated operand (Clause 5).
//
- // For both, T and U shall be complete types, (possibly cv-qualified)
+ // For both, T and U shall be complete types, (possibly cv-qualified)
// void, or arrays of unknown bound.
if (!LhsT->isVoidType() && !LhsT->isIncompleteArrayType() &&
- Self.RequireCompleteType(KeyLoc, LhsT,
+ Self.RequireCompleteType(KeyLoc, LhsT,
diag::err_incomplete_type_used_in_type_trait_expr))
return false;
if (!RhsT->isVoidType() && !RhsT->isIncompleteArrayType() &&
- Self.RequireCompleteType(KeyLoc, RhsT,
+ Self.RequireCompleteType(KeyLoc, RhsT,
diag::err_incomplete_type_used_in_type_trait_expr))
return false;
@@ -4608,7 +4805,7 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
if (LhsT->isVoidType() || RhsT->isVoidType())
return false;
- // Build expressions that emulate the effect of declval<T>() and
+ // Build expressions that emulate the effect of declval<T>() and
// declval<U>().
if (LhsT->isObjectType() || LhsT->isFunctionType())
LhsT = Self.Context.getRValueReferenceType(LhsT);
@@ -4618,8 +4815,8 @@ static bool EvaluateBinaryTypeTrait(Sema &Self, TypeTrait BTT, QualType LhsT,
Expr::getValueKindForType(LhsT));
OpaqueValueExpr Rhs(KeyLoc, RhsT.getNonLValueExprType(Self.Context),
Expr::getValueKindForType(RhsT));
-
- // Attempt the assignment in an unevaluated context within a SFINAE
+
+ // Attempt the assignment in an unevaluated context within a SFINAE
// trap at translation unit scope.
EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated);
Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
@@ -4789,11 +4986,14 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
!RHS.get()->getType()->isPlaceholderType() &&
"placeholders should have been weeded out by now");
- // The LHS undergoes lvalue conversions if this is ->*.
- if (isIndirect) {
+ // The LHS undergoes lvalue conversions if this is ->*, and undergoes the
+ // temporary materialization conversion otherwise.
+ if (isIndirect)
LHS = DefaultLvalueConversion(LHS.get());
- if (LHS.isInvalid()) return QualType();
- }
+ else if (LHS.get()->isRValue())
+ LHS = TemporaryMaterializationConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
// The RHS always undergoes lvalue conversions.
RHS = DefaultLvalueConversion(RHS.get());
@@ -5005,8 +5205,7 @@ static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
//
// This actually refers very narrowly to the lvalue-to-rvalue conversion, not
// to the array-to-pointer or function-to-pointer conversions.
- if (!TTy->getAs<TagType>())
- TTy = TTy.getUnqualifiedType();
+ TTy = TTy.getNonLValueExprType(Self.Context);
InitializedEntity Entity = InitializedEntity::InitializeTemporary(TTy);
InitializationSequence InitSeq(Self, Entity, Kind, From);
@@ -5052,7 +5251,7 @@ static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS
Self.MarkFunctionReferenced(QuestionLoc, Best->Function);
return false;
}
-
+
case OR_No_Viable_Function:
// Emit a better diagnostic if one of the expressions is a null pointer
@@ -5199,23 +5398,35 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// if both are glvalues of the same value category and the same type except
// for cv-qualification, an attempt is made to convert each of those
// operands to the type of the other.
+ // FIXME:
+ // Resolving a defect in P0012R1: we extend this to cover all cases where
+ // one of the operands is reference-compatible with the other, in order
+ // to support conditionals between functions differing in noexcept.
ExprValueKind LVK = LHS.get()->getValueKind();
ExprValueKind RVK = RHS.get()->getValueKind();
if (!Context.hasSameType(LTy, RTy) &&
- Context.hasSameUnqualifiedType(LTy, RTy) &&
LVK == RVK && LVK != VK_RValue) {
- // Since the unqualified types are reference-related and we require the
- // result to be as if a reference bound directly, the only conversion
- // we can perform is to add cv-qualifiers.
- Qualifiers LCVR = Qualifiers::fromCVRMask(LTy.getCVRQualifiers());
- Qualifiers RCVR = Qualifiers::fromCVRMask(RTy.getCVRQualifiers());
- if (RCVR.isStrictSupersetOf(LCVR)) {
- LHS = ImpCastExprToType(LHS.get(), RTy, CK_NoOp, LVK);
- LTy = LHS.get()->getType();
- }
- else if (LCVR.isStrictSupersetOf(RCVR)) {
+ // DerivedToBase was already handled by the class-specific case above.
+ // FIXME: Should we allow ObjC conversions here?
+ bool DerivedToBase, ObjCConversion, ObjCLifetimeConversion;
+ if (CompareReferenceRelationship(
+ QuestionLoc, LTy, RTy, DerivedToBase,
+ ObjCConversion, ObjCLifetimeConversion) == Ref_Compatible &&
+ !DerivedToBase && !ObjCConversion && !ObjCLifetimeConversion &&
+ // [...] subject to the constraint that the reference must bind
+ // directly [...]
+ !RHS.get()->refersToBitField() &&
+ !RHS.get()->refersToVectorElement()) {
RHS = ImpCastExprToType(RHS.get(), LTy, CK_NoOp, RVK);
RTy = RHS.get()->getType();
+ } else if (CompareReferenceRelationship(
+ QuestionLoc, RTy, LTy, DerivedToBase,
+ ObjCConversion, ObjCLifetimeConversion) == Ref_Compatible &&
+ !DerivedToBase && !ObjCConversion && !ObjCLifetimeConversion &&
+ !LHS.get()->refersToBitField() &&
+ !LHS.get()->refersToVectorElement()) {
+ LHS = ImpCastExprToType(LHS.get(), RTy, CK_NoOp, LVK);
+ LTy = LHS.get()->getType();
}
}
@@ -5234,6 +5445,20 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (LHS.get()->getObjectKind() == OK_BitField ||
RHS.get()->getObjectKind() == OK_BitField)
OK = OK_BitField;
+
+ // If we have function pointer types, unify them anyway to unify their
+ // exception specifications, if any.
+ if (LTy->isFunctionPointerType() || LTy->isMemberFunctionPointerType()) {
+ Qualifiers Qs = LTy.getQualifiers();
+ LTy = FindCompositePointerType(QuestionLoc, LHS, RHS,
+ /*ConvertArgs*/false);
+ LTy = Context.getQualifiedType(LTy, Qs);
+
+ assert(!LTy.isNull() && "failed to find composite pointer type for "
+ "canonically equivalent function ptr types");
+ assert(Context.hasSameType(LTy, RTy) && "bad composite pointer type");
+ }
+
return LTy;
}
@@ -5267,9 +5492,6 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (Context.getCanonicalType(LTy) == Context.getCanonicalType(RTy)) {
if (LTy->isRecordType()) {
// The operands have class type. Make a temporary copy.
- if (RequireNonAbstractType(QuestionLoc, LTy,
- diag::err_allocation_of_abstract_type))
- return QualType();
InitializedEntity Entity = InitializedEntity::InitializeTemporary(LTy);
ExprResult LHSCopy = PerformCopyInitialization(Entity,
@@ -5288,6 +5510,14 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
RHS = RHSCopy;
}
+ // If we have function pointer types, unify them anyway to unify their
+ // exception specifications, if any.
+ if (LTy->isFunctionPointerType() || LTy->isMemberFunctionPointerType()) {
+ LTy = FindCompositePointerType(QuestionLoc, LHS, RHS);
+ assert(!LTy.isNull() && "failed to find composite pointer type for "
+ "canonically equivalent function ptr types");
+ }
+
return LTy;
}
@@ -5329,19 +5559,9 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// performed to bring them to a common type, whose cv-qualification
// shall match the cv-qualification of either the second or the third
// operand. The result is of the common type.
- bool NonStandardCompositeType = false;
- QualType Composite = FindCompositePointerType(QuestionLoc, LHS, RHS,
- isSFINAEContext() ? nullptr
- : &NonStandardCompositeType);
- if (!Composite.isNull()) {
- if (NonStandardCompositeType)
- Diag(QuestionLoc,
- diag::ext_typecheck_cond_incompatible_operands_nonstandard)
- << LTy << RTy << Composite
- << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
-
+ QualType Composite = FindCompositePointerType(QuestionLoc, LHS, RHS);
+ if (!Composite.isNull())
return Composite;
- }
// Similarly, attempt to find composite type of two objective-c pointers.
Composite = FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
@@ -5358,90 +5578,176 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
return QualType();
}
+static FunctionProtoType::ExceptionSpecInfo
+mergeExceptionSpecs(Sema &S, FunctionProtoType::ExceptionSpecInfo ESI1,
+ FunctionProtoType::ExceptionSpecInfo ESI2,
+ SmallVectorImpl<QualType> &ExceptionTypeStorage) {
+ ExceptionSpecificationType EST1 = ESI1.Type;
+ ExceptionSpecificationType EST2 = ESI2.Type;
+
+ // If either of them can throw anything, that is the result.
+ if (EST1 == EST_None) return ESI1;
+ if (EST2 == EST_None) return ESI2;
+ if (EST1 == EST_MSAny) return ESI1;
+ if (EST2 == EST_MSAny) return ESI2;
+
+ // If either of them is non-throwing, the result is the other.
+ if (EST1 == EST_DynamicNone) return ESI2;
+ if (EST2 == EST_DynamicNone) return ESI1;
+ if (EST1 == EST_BasicNoexcept) return ESI2;
+ if (EST2 == EST_BasicNoexcept) return ESI1;
+
+ // If either of them is a non-value-dependent computed noexcept, that
+ // determines the result.
+ if (EST2 == EST_ComputedNoexcept && ESI2.NoexceptExpr &&
+ !ESI2.NoexceptExpr->isValueDependent())
+ return !ESI2.NoexceptExpr->EvaluateKnownConstInt(S.Context) ? ESI2 : ESI1;
+ if (EST1 == EST_ComputedNoexcept && ESI1.NoexceptExpr &&
+ !ESI1.NoexceptExpr->isValueDependent())
+ return !ESI1.NoexceptExpr->EvaluateKnownConstInt(S.Context) ? ESI1 : ESI2;
+ // If we're left with value-dependent computed noexcept expressions, we're
+ // stuck. Before C++17, we can just drop the exception specification entirely,
+ // since it's not actually part of the canonical type. And this should never
+ // happen in C++17, because it would mean we were computing the composite
+ // pointer type of dependent types, which should never happen.
+ if (EST1 == EST_ComputedNoexcept || EST2 == EST_ComputedNoexcept) {
+ assert(!S.getLangOpts().CPlusPlus1z &&
+ "computing composite pointer type of dependent types");
+ return FunctionProtoType::ExceptionSpecInfo();
+ }
+
+ // Switch over the possibilities so that people adding new values know to
+ // update this function.
+ switch (EST1) {
+ case EST_None:
+ case EST_DynamicNone:
+ case EST_MSAny:
+ case EST_BasicNoexcept:
+ case EST_ComputedNoexcept:
+ llvm_unreachable("handled above");
+
+ case EST_Dynamic: {
+ // This is the fun case: both exception specifications are dynamic. Form
+ // the union of the two lists.
+ assert(EST2 == EST_Dynamic && "other cases should already be handled");
+ llvm::SmallPtrSet<QualType, 8> Found;
+ for (auto &Exceptions : {ESI1.Exceptions, ESI2.Exceptions})
+ for (QualType E : Exceptions)
+ if (Found.insert(S.Context.getCanonicalType(E)).second)
+ ExceptionTypeStorage.push_back(E);
+
+ FunctionProtoType::ExceptionSpecInfo Result(EST_Dynamic);
+ Result.Exceptions = ExceptionTypeStorage;
+ return Result;
+ }
+
+ case EST_Unevaluated:
+ case EST_Uninstantiated:
+ case EST_Unparsed:
+ llvm_unreachable("shouldn't see unresolved exception specifications here");
+ }
+
+ llvm_unreachable("invalid ExceptionSpecificationType");
+}
+
/// \brief Find a merged pointer type and convert the two expressions to it.
///
/// This finds the composite pointer type (or member pointer type) for @p E1
-/// and @p E2 according to C++11 5.9p2. It converts both expressions to this
+/// and @p E2 according to C++1z 5p14. It converts both expressions to this
/// type and returns it.
/// It does not emit diagnostics.
///
/// \param Loc The location of the operator requiring these two expressions to
/// be converted to the composite pointer type.
///
-/// If \p NonStandardCompositeType is non-NULL, then we are permitted to find
-/// a non-standard (but still sane) composite type to which both expressions
-/// can be converted. When such a type is chosen, \c *NonStandardCompositeType
-/// will be set true.
+/// \param ConvertArgs If \c false, do not convert E1 and E2 to the target type.
QualType Sema::FindCompositePointerType(SourceLocation Loc,
Expr *&E1, Expr *&E2,
- bool *NonStandardCompositeType) {
- if (NonStandardCompositeType)
- *NonStandardCompositeType = false;
-
+ bool ConvertArgs) {
assert(getLangOpts().CPlusPlus && "This function assumes C++");
+
+ // C++1z [expr]p14:
+ // The composite pointer type of two operands p1 and p2 having types T1
+ // and T2
QualType T1 = E1->getType(), T2 = E2->getType();
- // C++11 5.9p2
- // Pointer conversions and qualification conversions are performed on
- // pointer operands to bring them to their composite pointer type. If
- // one operand is a null pointer constant, the composite pointer type is
- // std::nullptr_t if the other operand is also a null pointer constant or,
- // if the other operand is a pointer, the type of the other operand.
- if (!T1->isAnyPointerType() && !T1->isMemberPointerType() &&
- !T2->isAnyPointerType() && !T2->isMemberPointerType()) {
- if (T1->isNullPtrType() &&
- E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
- E2 = ImpCastExprToType(E2, T1, CK_NullToPointer).get();
- return T1;
- }
- if (T2->isNullPtrType() &&
- E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
- E1 = ImpCastExprToType(E1, T2, CK_NullToPointer).get();
- return T2;
- }
+ // where at least one is a pointer or pointer to member type or
+ // std::nullptr_t is:
+ bool T1IsPointerLike = T1->isAnyPointerType() || T1->isMemberPointerType() ||
+ T1->isNullPtrType();
+ bool T2IsPointerLike = T2->isAnyPointerType() || T2->isMemberPointerType() ||
+ T2->isNullPtrType();
+ if (!T1IsPointerLike && !T2IsPointerLike)
return QualType();
- }
- if (E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
- if (T2->isMemberPointerType())
- E1 = ImpCastExprToType(E1, T2, CK_NullToMemberPointer).get();
- else
- E1 = ImpCastExprToType(E1, T2, CK_NullToPointer).get();
- return T2;
- }
- if (E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
- if (T1->isMemberPointerType())
- E2 = ImpCastExprToType(E2, T1, CK_NullToMemberPointer).get();
- else
- E2 = ImpCastExprToType(E2, T1, CK_NullToPointer).get();
+ // - if both p1 and p2 are null pointer constants, std::nullptr_t;
+ // This can't actually happen, following the standard, but we also use this
+ // to implement the end of [expr.conv], which hits this case.
+ //
+ // - if either p1 or p2 is a null pointer constant, T2 or T1, respectively;
+ if (T1IsPointerLike &&
+ E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ if (ConvertArgs)
+ E2 = ImpCastExprToType(E2, T1, T1->isMemberPointerType()
+ ? CK_NullToMemberPointer
+ : CK_NullToPointer).get();
return T1;
}
+ if (T2IsPointerLike &&
+ E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ if (ConvertArgs)
+ E1 = ImpCastExprToType(E1, T2, T2->isMemberPointerType()
+ ? CK_NullToMemberPointer
+ : CK_NullToPointer).get();
+ return T2;
+ }
// Now both have to be pointers or member pointers.
- if ((!T1->isPointerType() && !T1->isMemberPointerType()) ||
- (!T2->isPointerType() && !T2->isMemberPointerType()))
+ if (!T1IsPointerLike || !T2IsPointerLike)
return QualType();
-
- // Otherwise, of one of the operands has type "pointer to cv1 void," then
- // the other has type "pointer to cv2 T" and the composite pointer type is
- // "pointer to cv12 void," where cv12 is the union of cv1 and cv2.
- // Otherwise, the composite pointer type is a pointer type similar to the
- // type of one of the operands, with a cv-qualification signature that is
- // the union of the cv-qualification signatures of the operand types.
- // In practice, the first part here is redundant; it's subsumed by the second.
- // What we do here is, we build the two possible composite types, and try the
- // conversions in both directions. If only one works, or if the two composite
- // types are the same, we have succeeded.
+ assert(!T1->isNullPtrType() && !T2->isNullPtrType() &&
+ "nullptr_t should be a null pointer constant");
+
+ // - if T1 or T2 is "pointer to cv1 void" and the other type is
+ // "pointer to cv2 T", "pointer to cv12 void", where cv12 is
+ // the union of cv1 and cv2;
+ // - if T1 or T2 is "pointer to noexcept function" and the other type is
+ // "pointer to function", where the function types are otherwise the same,
+ // "pointer to function";
+ // FIXME: This rule is defective: it should also permit removing noexcept
+ // from a pointer to member function. As a Clang extension, we also
+ // permit removing 'noreturn', so we generalize this rule to;
+ // - [Clang] If T1 and T2 are both of type "pointer to function" or
+ // "pointer to member function" and the pointee types can be unified
+ // by a function pointer conversion, that conversion is applied
+ // before checking the following rules.
+ // - if T1 is "pointer to cv1 C1" and T2 is "pointer to cv2 C2", where C1
+ // is reference-related to C2 or C2 is reference-related to C1 (8.6.3),
+ // the cv-combined type of T1 and T2 or the cv-combined type of T2 and T1,
+ // respectively;
+ // - if T1 is "pointer to member of C1 of type cv1 U1" and T2 is "pointer
+ // to member of C2 of type cv2 U2" where C1 is reference-related to C2 or
+ // C2 is reference-related to C1 (8.6.3), the cv-combined type of T2 and
+ // T1 or the cv-combined type of T1 and T2, respectively;
+ // - if T1 and T2 are similar types (4.5), the cv-combined type of T1 and
+ // T2;
+ //
+ // If looked at in the right way, these bullets all do the same thing.
+ // What we do here is, we build the two possible cv-combined types, and try
+ // the conversions in both directions. If only one works, or if the two
+ // composite types are the same, we have succeeded.
// FIXME: extended qualifiers?
- typedef SmallVector<unsigned, 4> QualifierVector;
- QualifierVector QualifierUnion;
- typedef SmallVector<std::pair<const Type *, const Type *>, 4>
- ContainingClassVector;
- ContainingClassVector MemberOfClass;
- QualType Composite1 = Context.getCanonicalType(T1),
- Composite2 = Context.getCanonicalType(T2);
+ //
+ // Note that this will fail to find a composite pointer type for "pointer
+ // to void" and "pointer to function". We can't actually perform the final
+ // conversion in this case, even though a composite pointer type formally
+ // exists.
+ SmallVector<unsigned, 4> QualifierUnion;
+ SmallVector<std::pair<const Type *, const Type *>, 4> MemberOfClass;
+ QualType Composite1 = T1;
+ QualType Composite2 = T2;
unsigned NeedConstBefore = 0;
- do {
+ while (true) {
const PointerType *Ptr1, *Ptr2;
if ((Ptr1 = Composite1->getAs<PointerType>()) &&
(Ptr2 = Composite2->getAs<PointerType>())) {
@@ -5450,8 +5756,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
// If we're allowed to create a non-standard composite type, keep track
// of where we need to fill in additional 'const' qualifiers.
- if (NonStandardCompositeType &&
- Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers())
+ if (Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers())
NeedConstBefore = QualifierUnion.size();
QualifierUnion.push_back(
@@ -5468,8 +5773,7 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
// If we're allowed to create a non-standard composite type, keep track
// of where we need to fill in additional 'const' qualifiers.
- if (NonStandardCompositeType &&
- Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers())
+ if (Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers())
NeedConstBefore = QualifierUnion.size();
QualifierUnion.push_back(
@@ -5483,109 +5787,125 @@ QualType Sema::FindCompositePointerType(SourceLocation Loc,
// Cannot unwrap any more types.
break;
- } while (true);
+ }
- if (NeedConstBefore && NonStandardCompositeType) {
+ // Apply the function pointer conversion to unify the types. We've already
+ // unwrapped down to the function types, and we want to merge rather than
+ // just convert, so do this ourselves rather than calling
+ // IsFunctionConversion.
+ //
+ // FIXME: In order to match the standard wording as closely as possible, we
+ // currently only do this under a single level of pointers. Ideally, we would
+ // allow this in general, and set NeedConstBefore to the relevant depth on
+ // the side(s) where we changed anything.
+ if (QualifierUnion.size() == 1) {
+ if (auto *FPT1 = Composite1->getAs<FunctionProtoType>()) {
+ if (auto *FPT2 = Composite2->getAs<FunctionProtoType>()) {
+ FunctionProtoType::ExtProtoInfo EPI1 = FPT1->getExtProtoInfo();
+ FunctionProtoType::ExtProtoInfo EPI2 = FPT2->getExtProtoInfo();
+
+ // The result is noreturn if both operands are.
+ bool Noreturn =
+ EPI1.ExtInfo.getNoReturn() && EPI2.ExtInfo.getNoReturn();
+ EPI1.ExtInfo = EPI1.ExtInfo.withNoReturn(Noreturn);
+ EPI2.ExtInfo = EPI2.ExtInfo.withNoReturn(Noreturn);
+
+ // The result is nothrow if both operands are.
+ SmallVector<QualType, 8> ExceptionTypeStorage;
+ EPI1.ExceptionSpec = EPI2.ExceptionSpec =
+ mergeExceptionSpecs(*this, EPI1.ExceptionSpec, EPI2.ExceptionSpec,
+ ExceptionTypeStorage);
+
+ Composite1 = Context.getFunctionType(FPT1->getReturnType(),
+ FPT1->getParamTypes(), EPI1);
+ Composite2 = Context.getFunctionType(FPT2->getReturnType(),
+ FPT2->getParamTypes(), EPI2);
+ }
+ }
+ }
+
+ if (NeedConstBefore) {
// Extension: Add 'const' to qualifiers that come before the first qualifier
// mismatch, so that our (non-standard!) composite type meets the
// requirements of C++ [conv.qual]p4 bullet 3.
- for (unsigned I = 0; I != NeedConstBefore; ++I) {
- if ((QualifierUnion[I] & Qualifiers::Const) == 0) {
+ for (unsigned I = 0; I != NeedConstBefore; ++I)
+ if ((QualifierUnion[I] & Qualifiers::Const) == 0)
QualifierUnion[I] = QualifierUnion[I] | Qualifiers::Const;
- *NonStandardCompositeType = true;
- }
- }
}
// Rewrap the composites as pointers or member pointers with the union CVRs.
- ContainingClassVector::reverse_iterator MOC
- = MemberOfClass.rbegin();
- for (QualifierVector::reverse_iterator
- I = QualifierUnion.rbegin(),
- E = QualifierUnion.rend();
- I != E; (void)++I, ++MOC) {
- Qualifiers Quals = Qualifiers::fromCVRMask(*I);
- if (MOC->first && MOC->second) {
+ auto MOC = MemberOfClass.rbegin();
+ for (unsigned CVR : llvm::reverse(QualifierUnion)) {
+ Qualifiers Quals = Qualifiers::fromCVRMask(CVR);
+ auto Classes = *MOC++;
+ if (Classes.first && Classes.second) {
// Rebuild member pointer type
Composite1 = Context.getMemberPointerType(
- Context.getQualifiedType(Composite1, Quals),
- MOC->first);
+ Context.getQualifiedType(Composite1, Quals), Classes.first);
Composite2 = Context.getMemberPointerType(
- Context.getQualifiedType(Composite2, Quals),
- MOC->second);
+ Context.getQualifiedType(Composite2, Quals), Classes.second);
} else {
// Rebuild pointer type
- Composite1
- = Context.getPointerType(Context.getQualifiedType(Composite1, Quals));
- Composite2
- = Context.getPointerType(Context.getQualifiedType(Composite2, Quals));
+ Composite1 =
+ Context.getPointerType(Context.getQualifiedType(Composite1, Quals));
+ Composite2 =
+ Context.getPointerType(Context.getQualifiedType(Composite2, Quals));
}
}
- // Try to convert to the first composite pointer type.
- InitializedEntity Entity1
- = InitializedEntity::InitializeTemporary(Composite1);
- InitializationKind Kind
- = InitializationKind::CreateCopy(Loc, SourceLocation());
- InitializationSequence E1ToC1(*this, Entity1, Kind, E1);
- InitializationSequence E2ToC1(*this, Entity1, Kind, E2);
-
- if (E1ToC1 && E2ToC1) {
- // Conversion to Composite1 is viable.
- if (!Context.hasSameType(Composite1, Composite2)) {
- // Composite2 is a different type from Composite1. Check whether
- // Composite2 is also viable.
- InitializedEntity Entity2
- = InitializedEntity::InitializeTemporary(Composite2);
- InitializationSequence E1ToC2(*this, Entity2, Kind, E1);
- InitializationSequence E2ToC2(*this, Entity2, Kind, E2);
- if (E1ToC2 && E2ToC2) {
- // Both Composite1 and Composite2 are viable and are different;
- // this is an ambiguity.
- return QualType();
- }
- }
+ struct Conversion {
+ Sema &S;
+ Expr *&E1, *&E2;
+ QualType Composite;
+ InitializedEntity Entity;
+ InitializationKind Kind;
+ InitializationSequence E1ToC, E2ToC;
+ bool Viable;
+
+ Conversion(Sema &S, SourceLocation Loc, Expr *&E1, Expr *&E2,
+ QualType Composite)
+ : S(S), E1(E1), E2(E2), Composite(Composite),
+ Entity(InitializedEntity::InitializeTemporary(Composite)),
+ Kind(InitializationKind::CreateCopy(Loc, SourceLocation())),
+ E1ToC(S, Entity, Kind, E1), E2ToC(S, Entity, Kind, E2),
+ Viable(E1ToC && E2ToC) {}
+
+ bool perform() {
+ ExprResult E1Result = E1ToC.Perform(S, Entity, Kind, E1);
+ if (E1Result.isInvalid())
+ return true;
+ E1 = E1Result.getAs<Expr>();
- // Convert E1 to Composite1
- ExprResult E1Result
- = E1ToC1.Perform(*this, Entity1, Kind, E1);
- if (E1Result.isInvalid())
- return QualType();
- E1 = E1Result.getAs<Expr>();
+ ExprResult E2Result = E2ToC.Perform(S, Entity, Kind, E2);
+ if (E2Result.isInvalid())
+ return true;
+ E2 = E2Result.getAs<Expr>();
- // Convert E2 to Composite1
- ExprResult E2Result
- = E2ToC1.Perform(*this, Entity1, Kind, E2);
- if (E2Result.isInvalid())
- return QualType();
- E2 = E2Result.getAs<Expr>();
+ return false;
+ }
+ };
- return Composite1;
+ // Try to convert to each composite pointer type.
+ Conversion C1(*this, Loc, E1, E2, Composite1);
+ if (C1.Viable && Context.hasSameType(Composite1, Composite2)) {
+ if (ConvertArgs && C1.perform())
+ return QualType();
+ return C1.Composite;
}
+ Conversion C2(*this, Loc, E1, E2, Composite2);
- // Check whether Composite2 is viable.
- InitializedEntity Entity2
- = InitializedEntity::InitializeTemporary(Composite2);
- InitializationSequence E1ToC2(*this, Entity2, Kind, E1);
- InitializationSequence E2ToC2(*this, Entity2, Kind, E2);
- if (!E1ToC2 || !E2ToC2)
+ if (C1.Viable == C2.Viable) {
+ // Either Composite1 and Composite2 are viable and are different, or
+ // neither is viable.
+ // FIXME: How both be viable and different?
return QualType();
+ }
- // Convert E1 to Composite2
- ExprResult E1Result
- = E1ToC2.Perform(*this, Entity2, Kind, E1);
- if (E1Result.isInvalid())
- return QualType();
- E1 = E1Result.getAs<Expr>();
-
- // Convert E2 to Composite2
- ExprResult E2Result
- = E2ToC2.Perform(*this, Entity2, Kind, E2);
- if (E2Result.isInvalid())
+ // Convert to the chosen type.
+ if (ConvertArgs && (C1.Viable ? C1 : C2).perform())
return QualType();
- E2 = E2Result.getAs<Expr>();
- return Composite2;
+ return C1.Viable ? C1.Composite : C2.Composite;
}
ExprResult Sema::MaybeBindToTemporary(Expr *E) {
@@ -5618,14 +5938,14 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
else if (MemberExpr *Mem = dyn_cast<MemberExpr>(Callee))
T = Mem->getMemberDecl()->getType();
}
-
+
if (const PointerType *Ptr = T->getAs<PointerType>())
T = Ptr->getPointeeType();
else if (const BlockPointerType *Ptr = T->getAs<BlockPointerType>())
T = Ptr->getPointeeType();
else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>())
T = MemPtr->getPointeeType();
-
+
const FunctionType *FTy = T->getAs<FunctionType>();
assert(FTy && "call to value not of function type?");
ReturnsRetained = FTy->getExtInfo().getProducesResult();
@@ -6012,7 +6332,7 @@ ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
// so adjust the base type to the object type itself.
if (BaseType->isObjCObjectPointerType())
BaseType = BaseType->getPointeeType();
-
+
// C++ [basic.lookup.classref]p2:
// [...] If the type of the object expression is of pointer to scalar
// type, the unqualified-id is looked up in the context of the complete
@@ -6037,7 +6357,7 @@ ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
// The object type must be complete (or dependent), or
// C++11 [expr.prim.general]p3:
// Unlike the object expression in other contexts, *this is not required to
- // be of complete type for purposes of class member access (5.2.5) outside
+ // be of complete type for purposes of class member access (5.2.5) outside
// the member function body.
if (!BaseType->isDependentType() &&
!isThisOutsideMemberFunctionBody(BaseType) &&
@@ -6053,7 +6373,7 @@ ExprResult Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base,
return Base;
}
-static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base,
+static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base,
tok::TokenKind& OpKind, SourceLocation OpLoc) {
if (Base->hasPlaceholderType()) {
ExprResult result = S.CheckPlaceholderExpr(Base);
@@ -6129,9 +6449,9 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType,
DestructedTypeStart);
Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
- } else if (DestructedType.getObjCLifetime() !=
+ } else if (DestructedType.getObjCLifetime() !=
ObjectType.getObjCLifetime()) {
-
+
if (DestructedType.getObjCLifetime() == Qualifiers::OCL_None) {
// Okay: just pretend that the user provided the correctly-qualified
// type.
@@ -6140,7 +6460,7 @@ ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
<< ObjectType << DestructedType << Base->getSourceRange()
<< DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
}
-
+
// Recover by setting the destructed type to the object type.
DestructedType = ObjectType;
DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType,
@@ -6324,7 +6644,7 @@ ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
- SourceLocation TildeLoc,
+ SourceLocation TildeLoc,
const DeclSpec& DS) {
QualType ObjectType;
if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
@@ -6519,7 +6839,17 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
if (Res.isInvalid())
return E;
E = Res.get();
- }
+ }
+
+ // C++1z:
+ // If the expression is a prvalue after this optional conversion, the
+ // temporary materialization conversion is applied.
+ //
+ // We skip this step: IR generation is able to synthesize the storage for
+ // itself in the aggregate case, and adding the extra node to the AST is
+ // just clutter.
+ // FIXME: We don't emit lifetime markers for the temporaries due to this.
+ // FIXME: Do any other AST consumers care about this?
return E;
}
@@ -6549,13 +6879,13 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
// we can unambiguously check if the variable is a constant expression.
// - if the initializer is not value dependent - we can determine whether
// it can be used to initialize a constant expression. If Init can not
-// be used to initialize a constant expression we conclude that Var can
+// be used to initialize a constant expression we conclude that Var can
// never be a constant expression.
// - FXIME: if the initializer is dependent, we can still do some analysis and
// identify certain cases unambiguously as non-const by using a Visitor:
// - such as those that involve odr-use of a ParmVarDecl, involve a new
// delete, lambda-expr, dynamic-cast, reinterpret-cast etc...
-static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
+static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
ASTContext &Context) {
if (isa<ParmVarDecl>(Var)) return true;
const VarDecl *DefVD = nullptr;
@@ -6575,14 +6905,14 @@ static inline bool VariableCanNeverBeAConstantExpression(VarDecl *Var,
return false;
}
- return !IsVariableAConstantExpression(Var, Context);
+ return !IsVariableAConstantExpression(Var, Context);
}
-/// \brief Check if the current lambda has any potential captures
-/// that must be captured by any of its enclosing lambdas that are ready to
-/// capture. If there is a lambda that can capture a nested
-/// potential-capture, go ahead and do so. Also, check to see if any
-/// variables are uncaptureable or do not involve an odr-use so do not
+/// \brief Check if the current lambda has any potential captures
+/// that must be captured by any of its enclosing lambdas that are ready to
+/// capture. If there is a lambda that can capture a nested
+/// potential-capture, go ahead and do so. Also, check to see if any
+/// variables are uncaptureable or do not involve an odr-use so do not
/// need to be captured.
static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
@@ -6603,7 +6933,7 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
ArrayRef<const FunctionScopeInfo *> FunctionScopesArrayRef(
S.FunctionScopes.data(), S.FunctionScopes.size());
-
+
// All the potentially captureable variables in the current nested
// lambda (within a generic outer lambda), must be captured by an
// outer lambda that is enclosed within a non-dependent context.
@@ -6614,7 +6944,7 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
VarDecl *Var = nullptr;
CurrentLSI->getPotentialVariableCapture(I, Var, VarExpr);
// If the variable is clearly identified as non-odr-used and the full
- // expression is not instantiation dependent, only then do we not
+ // expression is not instantiation dependent, only then do we not
// need to check enclosing lambda's for speculative captures.
// For e.g.:
// Even though 'x' is not odr-used, it should be captured.
@@ -6636,27 +6966,27 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue();
MarkVarDeclODRUsed(Var, VarExpr->getExprLoc(), S,
&FunctionScopeIndexOfCapturableLambda);
- }
- const bool IsVarNeverAConstantExpression =
+ }
+ const bool IsVarNeverAConstantExpression =
VariableCanNeverBeAConstantExpression(Var, S.Context);
if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) {
// This full expression is not instantiation dependent or the variable
- // can not be used in a constant expression - which means
- // this variable must be odr-used here, so diagnose a
+ // can not be used in a constant expression - which means
+ // this variable must be odr-used here, so diagnose a
// capture violation early, if the variable is un-captureable.
// This is purely for diagnosing errors early. Otherwise, this
// error would get diagnosed when the lambda becomes capture ready.
QualType CaptureType, DeclRefType;
SourceLocation ExprLoc = VarExpr->getExprLoc();
if (S.tryCaptureVariable(Var, ExprLoc, S.TryCapture_Implicit,
- /*EllipsisLoc*/ SourceLocation(),
- /*BuildAndDiagnose*/false, CaptureType,
+ /*EllipsisLoc*/ SourceLocation(),
+ /*BuildAndDiagnose*/false, CaptureType,
DeclRefType, nullptr)) {
// We will never be able to capture this variable, and we need
// to be able to in any and all instantiations, so diagnose it.
S.tryCaptureVariable(Var, ExprLoc, S.TryCapture_Implicit,
- /*EllipsisLoc*/ SourceLocation(),
- /*BuildAndDiagnose*/true, CaptureType,
+ /*EllipsisLoc*/ SourceLocation(),
+ /*BuildAndDiagnose*/true, CaptureType,
DeclRefType, nullptr);
}
}
@@ -6983,15 +7313,15 @@ Sema::CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl,
ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
bool DiscardedValue,
- bool IsConstexpr,
+ bool IsConstexpr,
bool IsLambdaInitCaptureInitializer) {
ExprResult FullExpr = FE;
if (!FullExpr.get())
return ExprError();
-
- // If we are an init-expression in a lambdas init-capture, we should not
- // diagnose an unexpanded pack now (will be diagnosed once lambda-expr
+
+ // If we are an init-expression in a lambdas init-capture, we should not
+ // diagnose an unexpanded pack now (will be diagnosed once lambda-expr
// containing full-expression is done).
// template<class ... Ts> void test(Ts ... t) {
// test([&a(t)]() { <-- (t) is an init-expr that shouldn't be diagnosed now.
@@ -7005,7 +7335,7 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
// lambda where we've entered the introducer but not the body, or represent a
// lambda where we've entered the body, depending on where the
// parser/instantiation has got to).
- if (!IsLambdaInitCaptureInitializer &&
+ if (!IsLambdaInitCaptureInitializer &&
DiagnoseUnexpandedParameterPack(FullExpr.get()))
return ExprError();
@@ -7033,13 +7363,13 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
CheckCompletedExpr(FullExpr.get(), CC, IsConstexpr);
- // At the end of this full expression (which could be a deeply nested
- // lambda), if there is a potential capture within the nested lambda,
+ // At the end of this full expression (which could be a deeply nested
+ // lambda), if there is a potential capture within the nested lambda,
// have the outer capture-able lambda try and capture it.
// Consider the following code:
// void f(int, int);
// void f(const int&, double);
- // void foo() {
+ // void foo() {
// const int x = 10, y = 20;
// auto L = [=](auto a) {
// auto M = [=](auto b) {
@@ -7049,35 +7379,35 @@ ExprResult Sema::ActOnFinishFullExpr(Expr *FE, SourceLocation CC,
// };
// }
- // FIXME: Also consider what happens for something like this that involves
- // the gnu-extension statement-expressions or even lambda-init-captures:
+ // FIXME: Also consider what happens for something like this that involves
+ // the gnu-extension statement-expressions or even lambda-init-captures:
// void f() {
// const int n = 0;
// auto L = [&](auto a) {
// +n + ({ 0; a; });
// };
// }
- //
- // Here, we see +n, and then the full-expression 0; ends, so we don't
- // capture n (and instead remove it from our list of potential captures),
- // and then the full-expression +n + ({ 0; }); ends, but it's too late
+ //
+ // Here, we see +n, and then the full-expression 0; ends, so we don't
+ // capture n (and instead remove it from our list of potential captures),
+ // and then the full-expression +n + ({ 0; }); ends, but it's too late
// for us to see that we need to capture n after all.
LambdaScopeInfo *const CurrentLSI =
getCurLambda(/*IgnoreCapturedRegions=*/true);
- // FIXME: PR 17877 showed that getCurLambda() can return a valid pointer
+ // FIXME: PR 17877 showed that getCurLambda() can return a valid pointer
// even if CurContext is not a lambda call operator. Refer to that Bug Report
- // for an example of the code that might cause this asynchrony.
+ // for an example of the code that might cause this asynchrony.
// By ensuring we are in the context of a lambda's call operator
// we can fix the bug (we only need to check whether we need to capture
- // if we are within a lambda's body); but per the comments in that
+ // if we are within a lambda's body); but per the comments in that
// PR, a proper fix would entail :
// "Alternative suggestion:
- // - Add to Sema an integer holding the smallest (outermost) scope
- // index that we are *lexically* within, and save/restore/set to
- // FunctionScopes.size() in InstantiatingTemplate's
+ // - Add to Sema an integer holding the smallest (outermost) scope
+ // index that we are *lexically* within, and save/restore/set to
+ // FunctionScopes.size() in InstantiatingTemplate's
// constructor/destructor.
- // - Teach the handful of places that iterate over FunctionScopes to
+ // - Teach the handful of places that iterate over FunctionScopes to
// stop at the outermost enclosing lexical scope."
DeclContext *DC = CurContext;
while (DC && isa<CapturedDecl>(DC))
@@ -7096,34 +7426,34 @@ StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) {
return MaybeCreateStmtWithCleanups(FullStmt);
}
-Sema::IfExistsResult
+Sema::IfExistsResult
Sema::CheckMicrosoftIfExistsSymbol(Scope *S,
CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo) {
DeclarationName TargetName = TargetNameInfo.getName();
if (!TargetName)
return IER_DoesNotExist;
-
+
// If the name itself is dependent, then the result is dependent.
if (TargetName.isDependentName())
return IER_Dependent;
-
+
// Do the redeclaration lookup in the current scope.
LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName,
Sema::NotForRedeclaration);
LookupParsedName(R, S, &SS);
R.suppressDiagnostics();
-
+
switch (R.getResultKind()) {
case LookupResult::Found:
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue:
case LookupResult::Ambiguous:
return IER_Exists;
-
+
case LookupResult::NotFound:
return IER_DoesNotExist;
-
+
case LookupResult::NotFoundInCurrentInstantiation:
return IER_Dependent;
}
@@ -7131,23 +7461,17 @@ Sema::CheckMicrosoftIfExistsSymbol(Scope *S,
llvm_unreachable("Invalid LookupResult Kind!");
}
-Sema::IfExistsResult
+Sema::IfExistsResult
Sema::CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name) {
DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
-
- // Check for unexpanded parameter packs.
- SmallVector<UnexpandedParameterPack, 4> Unexpanded;
- collectUnexpandedParameterPacks(SS, Unexpanded);
- collectUnexpandedParameterPacks(TargetNameInfo, Unexpanded);
- if (!Unexpanded.empty()) {
- DiagnoseUnexpandedParameterPacks(KeywordLoc,
- IsIfExists? UPPC_IfExists
- : UPPC_IfNotExists,
- Unexpanded);
+
+ // Check for an unexpanded parameter pack.
+ auto UPPC = IsIfExists ? UPPC_IfExists : UPPC_IfNotExists;
+ if (DiagnoseUnexpandedParameterPack(SS, UPPC) ||
+ DiagnoseUnexpandedParameterPack(TargetNameInfo, UPPC))
return IER_Error;
- }
-
+
return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo);
}
diff --git a/lib/Sema/SemaExprMember.cpp b/lib/Sema/SemaExprMember.cpp
index 283621889f80..806a3d813ee8 100644
--- a/lib/Sema/SemaExprMember.cpp
+++ b/lib/Sema/SemaExprMember.cpp
@@ -269,6 +269,20 @@ Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
llvm_unreachable("unexpected instance member access kind");
}
+/// Determine whether input char is from rgba component set.
+static bool
+IsRGBA(char c) {
+ switch (c) {
+ case 'r':
+ case 'g':
+ case 'b':
+ case 'a':
+ return true;
+ default:
+ return false;
+ }
+}
+
/// Check an ext-vector component access expression.
///
/// VK should be set in advance to the value kind of the base
@@ -308,11 +322,25 @@ CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
HalvingSwizzle = true;
} else if (!HexSwizzle &&
(Idx = vecType->getPointAccessorIdx(*compStr)) != -1) {
+ bool HasRGBA = IsRGBA(*compStr);
do {
+ // Ensure that xyzw and rgba components don't intermingle.
+ if (HasRGBA != IsRGBA(*compStr))
+ break;
if (HasIndex[Idx]) HasRepeated = true;
HasIndex[Idx] = true;
compStr++;
} while (*compStr && (Idx = vecType->getPointAccessorIdx(*compStr)) != -1);
+
+ // Emit a warning if an rgba selector is used earlier than OpenCL 2.2
+ if (HasRGBA || (*compStr && IsRGBA(*compStr))) {
+ if (S.getLangOpts().OpenCL && S.getLangOpts().OpenCLVersion < 220) {
+ const char *DiagBegin = HasRGBA ? CompName->getNameStart() : compStr;
+ S.Diag(OpLoc, diag::ext_opencl_ext_vector_type_rgba_selector)
+ << StringRef(DiagBegin, 1)
+ << S.getLangOpts().OpenCLVersion << SourceRange(CompLoc);
+ }
+ }
} else {
if (HexSwizzle) compStr++;
while ((Idx = vecType->getNumericAccessorIdx(*compStr)) != -1) {
@@ -339,7 +367,7 @@ CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
compStr++;
while (*compStr) {
- if (!vecType->isAccessorWithinNumElements(*compStr++)) {
+ if (!vecType->isAccessorWithinNumElements(*compStr++, HexSwizzle)) {
S.Diag(OpLoc, diag::err_ext_vector_component_exceeds_length)
<< baseType << SourceRange(CompLoc);
return QualType();
@@ -743,12 +771,6 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
false, ExtraArgs);
}
-static ExprResult
-BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
- SourceLocation OpLoc, const CXXScopeSpec &SS,
- FieldDecl *Field, DeclAccessPair FoundDecl,
- const DeclarationNameInfo &MemberNameInfo);
-
ExprResult
Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
SourceLocation loc,
@@ -834,7 +856,7 @@ Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
// Make a nameInfo that properly uses the anonymous name.
DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
- result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer,
+ result = BuildFieldReferenceExpr(result, baseObjectIsPointer,
SourceLocation(), EmptySS, field,
foundDecl, memberNameInfo).get();
if (!result)
@@ -855,9 +877,10 @@ Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
DeclAccessPair::make(field, field->getAccess());
result =
- BuildFieldReferenceExpr(*this, result, /*isarrow*/ false,
- SourceLocation(), (FI == FEnd ? SS : EmptySS),
- field, fakeFoundDecl, memberNameInfo).get();
+ BuildFieldReferenceExpr(result, /*isarrow*/ false, SourceLocation(),
+ (FI == FEnd ? SS : EmptySS), field,
+ fakeFoundDecl, memberNameInfo)
+ .get();
}
return result;
@@ -946,6 +969,15 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
BaseType = BaseType->castAs<PointerType>()->getPointeeType();
}
R.setBaseObjectType(BaseType);
+
+ // C++1z [expr.ref]p2:
+ // For the first option (dot) the first expression shall be a glvalue [...]
+ if (!IsArrow && BaseExpr->isRValue()) {
+ ExprResult Converted = TemporaryMaterializationConversion(BaseExpr);
+ if (Converted.isInvalid())
+ return ExprError();
+ BaseExpr = Converted.get();
+ }
LambdaScopeInfo *const CurLSI = getCurLambda();
// If this is an implicit member reference and the overloaded
@@ -1125,8 +1157,8 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
return ExprError();
if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl))
- return BuildFieldReferenceExpr(*this, BaseExpr, IsArrow, OpLoc, SS, FD,
- FoundDecl, MemberNameInfo);
+ return BuildFieldReferenceExpr(BaseExpr, IsArrow, OpLoc, SS, FD, FoundDecl,
+ MemberNameInfo);
if (MSPropertyDecl *PD = dyn_cast<MSPropertyDecl>(MemberDecl))
return BuildMSPropertyRefExpr(*this, BaseExpr, IsArrow, SS, PD,
@@ -1371,10 +1403,17 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
// Figure out the class that declares the ivar.
assert(!ClassDeclared);
+
Decl *D = cast<Decl>(IV->getDeclContext());
- if (ObjCCategoryDecl *CAT = dyn_cast<ObjCCategoryDecl>(D))
- D = CAT->getClassInterface();
- ClassDeclared = cast<ObjCInterfaceDecl>(D);
+ if (auto *Category = dyn_cast<ObjCCategoryDecl>(D))
+ D = Category->getClassInterface();
+
+ if (auto *Implementation = dyn_cast<ObjCImplementationDecl>(D))
+ ClassDeclared = Implementation->getClassInterface();
+ else if (auto *Interface = dyn_cast<ObjCInterfaceDecl>(D))
+ ClassDeclared = Interface;
+
+ assert(ClassDeclared && "cannot query interface");
} else {
if (IsArrow &&
IDecl->FindPropertyDeclaration(
@@ -1426,11 +1465,11 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
if (IV->getAccessControl() == ObjCIvarDecl::Private) {
if (!declaresSameEntity(ClassDeclared, IDecl) ||
!declaresSameEntity(ClassOfMethodDecl, ClassDeclared))
- S.Diag(MemberLoc, diag::error_private_ivar_access)
+ S.Diag(MemberLoc, diag::err_private_ivar_access)
<< IV->getDeclName();
} else if (!IDecl->isSuperClassOf(ClassOfMethodDecl))
// @protected
- S.Diag(MemberLoc, diag::error_protected_ivar_access)
+ S.Diag(MemberLoc, diag::err_protected_ivar_access)
<< IV->getDeclName();
}
}
@@ -1443,7 +1482,7 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
if (DeclRefExpr *DE = dyn_cast<DeclRefExpr>(BaseExp))
if (DE->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
- S.Diag(DE->getLocation(), diag::error_arc_weak_ivar_access);
+ S.Diag(DE->getLocation(), diag::err_arc_weak_ivar_access);
warn = false;
}
}
@@ -1729,11 +1768,11 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
NameInfo, TemplateArgs, S, &ExtraArgs);
}
-static ExprResult
-BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
- SourceLocation OpLoc, const CXXScopeSpec &SS,
- FieldDecl *Field, DeclAccessPair FoundDecl,
- const DeclarationNameInfo &MemberNameInfo) {
+ExprResult
+Sema::BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
+ SourceLocation OpLoc, const CXXScopeSpec &SS,
+ FieldDecl *Field, DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo) {
// x.a is an l-value if 'a' has a reference type. Otherwise:
// x.a is an l-value/x-value/pr-value if the base is (and note
// that *x is always an l-value), except that if the base isn't
@@ -1767,36 +1806,34 @@ BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
// except that 'mutable' members don't pick up 'const'.
if (Field->isMutable()) BaseQuals.removeConst();
- Qualifiers MemberQuals
- = S.Context.getCanonicalType(MemberType).getQualifiers();
+ Qualifiers MemberQuals =
+ Context.getCanonicalType(MemberType).getQualifiers();
assert(!MemberQuals.hasAddressSpace());
-
Qualifiers Combined = BaseQuals + MemberQuals;
if (Combined != MemberQuals)
- MemberType = S.Context.getQualifiedType(MemberType, Combined);
+ MemberType = Context.getQualifiedType(MemberType, Combined);
}
- S.UnusedPrivateFields.remove(Field);
+ UnusedPrivateFields.remove(Field);
- ExprResult Base =
- S.PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(),
- FoundDecl, Field);
+ ExprResult Base = PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(),
+ FoundDecl, Field);
if (Base.isInvalid())
return ExprError();
MemberExpr *ME =
- BuildMemberExpr(S, S.Context, Base.get(), IsArrow, OpLoc, SS,
+ BuildMemberExpr(*this, Context, Base.get(), IsArrow, OpLoc, SS,
/*TemplateKWLoc=*/SourceLocation(), Field, FoundDecl,
MemberNameInfo, MemberType, VK, OK);
// Build a reference to a private copy for non-static data members in
// non-static member functions, privatized by OpenMP constructs.
- if (S.getLangOpts().OpenMP && IsArrow &&
- !S.CurContext->isDependentContext() &&
+ if (getLangOpts().OpenMP && IsArrow &&
+ !CurContext->isDependentContext() &&
isa<CXXThisExpr>(Base.get()->IgnoreParenImpCasts())) {
- if (auto *PrivateCopy = S.IsOpenMPCapturedDecl(Field))
- return S.getOpenMPCapturedExpr(PrivateCopy, VK, OK, OpLoc);
+ if (auto *PrivateCopy = IsOpenMPCapturedDecl(Field))
+ return getOpenMPCapturedExpr(PrivateCopy, VK, OK, OpLoc);
}
return ME;
}
diff --git a/lib/Sema/SemaExprObjC.cpp b/lib/Sema/SemaExprObjC.cpp
index 8f0d4ff69576..7dbd660f53ec 100644
--- a/lib/Sema/SemaExprObjC.cpp
+++ b/lib/Sema/SemaExprObjC.cpp
@@ -1112,7 +1112,7 @@ static bool HelperToDiagnoseMismatchedMethodsInGlobalPool(Sema &S,
MatchingMethodDecl, Sema::MMS_loose)) {
if (!Warned) {
Warned = true;
- S.Diag(AtLoc, diag::warning_multiple_selectors)
+ S.Diag(AtLoc, diag::warn_multiple_selectors)
<< Method->getSelector() << FixItHint::CreateInsertion(LParenLoc, "(")
<< FixItHint::CreateInsertion(RParenLoc, ")");
S.Diag(Method->getLocation(), diag::note_method_declared_at)
@@ -1131,7 +1131,7 @@ static void DiagnoseMismatchedSelectors(Sema &S, SourceLocation AtLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors) {
if (!WarnMultipleSelectors ||
- S.Diags.isIgnored(diag::warning_multiple_selectors, SourceLocation()))
+ S.Diags.isIgnored(diag::warn_multiple_selectors, SourceLocation()))
return;
bool Warned = false;
for (Sema::GlobalMethodPool::iterator b = S.MethodPool.begin(),
@@ -1534,7 +1534,7 @@ bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
const ObjCMethodDecl *OMD = SelectorsForTypoCorrection(Sel, ReceiverType);
if (OMD && !OMD->isInvalidDecl()) {
if (getLangOpts().ObjCAutoRefCount)
- DiagID = diag::error_method_not_found_with_typo;
+ DiagID = diag::err_method_not_found_with_typo;
else
DiagID = isClassMessage ? diag::warn_class_method_not_found_with_typo
: diag::warn_instance_method_not_found_with_typo;
@@ -1956,7 +1956,7 @@ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
if (CurMethod->isInstanceMethod()) {
if (SuperType.isNull()) {
// The current class does not have a superclass.
- Diag(receiverNameLoc, diag::error_root_class_cannot_use_super)
+ Diag(receiverNameLoc, diag::err_root_class_cannot_use_super)
<< CurMethod->getClassInterface()->getIdentifier();
return ExprError();
}
@@ -2165,7 +2165,7 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
ObjCInterfaceDecl *Class = Method->getClassInterface();
if (!Class) {
- Diag(SuperLoc, diag::error_no_super_class_message)
+ Diag(SuperLoc, diag::err_no_super_class_message)
<< Method->getDeclName();
return ExprError();
}
@@ -2173,7 +2173,7 @@ ExprResult Sema::ActOnSuperMessage(Scope *S,
QualType SuperTy(Class->getSuperClassType(), 0);
if (SuperTy.isNull()) {
// The current class does not have a superclass.
- Diag(SuperLoc, diag::error_root_class_cannot_use_super)
+ Diag(SuperLoc, diag::err_root_class_cannot_use_super)
<< Class->getIdentifier();
return ExprError();
}
@@ -2539,6 +2539,10 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
SourceLocation RBracLoc,
MultiExprArg ArgsIn,
bool isImplicit) {
+ assert((Receiver || SuperLoc.isValid()) && "If the Receiver is null, the "
+ "SuperLoc must be valid so we can "
+ "use it instead.");
+
// The location of the receiver.
SourceLocation Loc = SuperLoc.isValid()? SuperLoc : Receiver->getLocStart();
SourceRange RecRange =
@@ -2645,7 +2649,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
CollectMultipleMethodsInGlobalPool(Sel, Methods, true/*InstanceFirst*/,
true/*CheckTheOther*/, typeBound);
if (!Methods.empty()) {
- // We chose the first method as the initial condidate, then try to
+ // We choose the first method as the initial candidate, then try to
// select a better one.
Method = Methods[0];
@@ -2701,7 +2705,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
false/*InstanceFirst*/,
true/*CheckTheOther*/);
if (!Methods.empty()) {
- // We chose the first method as the initial condidate, then try
+ // We choose the first method as the initial candidate, then try
// to select a better one.
Method = Methods[0];
@@ -2789,7 +2793,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
true/*InstanceFirst*/,
false/*CheckTheOther*/);
if (!Methods.empty()) {
- // We chose the first method as the initial condidate, then try
+ // We choose the first method as the initial candidate, then try
// to select a better one.
Method = Methods[0];
diff --git a/lib/Sema/SemaInit.cpp b/lib/Sema/SemaInit.cpp
index 060ee3eef212..befee05713e0 100644
--- a/lib/Sema/SemaInit.cpp
+++ b/lib/Sema/SemaInit.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/Initialization.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/ExprCXX.h"
@@ -19,13 +18,13 @@
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Designator.h"
+#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
-#include <map>
using namespace clang;
@@ -470,9 +469,14 @@ ExprResult InitListChecker::PerformEmptyInit(Sema &SemaRef,
SemaRef.Diag(Entity.getDecl()->getLocation(),
diag::note_in_omitted_aggregate_initializer)
<< /*field*/1 << Entity.getDecl();
- else if (Entity.getKind() == InitializedEntity::EK_ArrayElement)
+ else if (Entity.getKind() == InitializedEntity::EK_ArrayElement) {
+ bool IsTrailingArrayNewMember =
+ Entity.getParent() &&
+ Entity.getParent()->isVariableLengthArrayNew();
SemaRef.Diag(Loc, diag::note_in_omitted_aggregate_initializer)
- << /*array element*/0 << Entity.getElementIndex();
+ << (IsTrailingArrayNewMember ? 2 : /*array element*/0)
+ << Entity.getElementIndex();
+ }
}
return ExprError();
}
@@ -686,8 +690,12 @@ InitListChecker::FillInEmptyInitializations(const InitializedEntity &Entity,
unsigned NumElements = NumInits;
if (const ArrayType *AType = SemaRef.Context.getAsArrayType(ILE->getType())) {
ElementType = AType->getElementType();
- if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType))
+ if (const auto *CAType = dyn_cast<ConstantArrayType>(AType))
NumElements = CAType->getSize().getZExtValue();
+ // For an array new with an unknown bound, ask for one additional element
+ // in order to populate the array filler.
+ if (Entity.isVariableLengthArrayNew())
+ ++NumElements;
ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context,
0, Entity);
} else if (const VectorType *VType = ILE->getType()->getAs<VectorType>()) {
@@ -937,6 +945,7 @@ static void warnBracedScalarInit(Sema &S, const InitializedEntity &Entity,
case InitializedEntity::EK_Base:
case InitializedEntity::EK_Delegating:
case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_Binding:
llvm_unreachable("unexpected braced scalar init");
}
@@ -1229,8 +1238,9 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
// subaggregate, brace elision is assumed and the initializer is
// considered for the initialization of the first member of
// the subaggregate.
- if (!SemaRef.getLangOpts().OpenCL &&
- (ElemType->isAggregateType() || ElemType->isVectorType())) {
+ // OpenCL vector initializer is handled elsewhere.
+ if ((!SemaRef.getLangOpts().OpenCL && ElemType->isVectorType()) ||
+ ElemType->isAggregateType()) {
CheckImplicitInitList(Entity, IList, ElemType, Index, StructuredList,
StructuredIndex);
++StructuredIndex;
@@ -1685,10 +1695,13 @@ void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
ArrayType::Normal, 0);
}
if (!hadError && VerifyOnly) {
- // Check if there are any members of the array that get value-initialized.
- // If so, check if doing that is possible.
+ // If there are any members of the array that get value-initialized, check
+ // that is possible. That happens if we know the bound and don't have
+ // enough elements, or if we're performing an array new with an unknown
+ // bound.
// FIXME: This needs to detect holes left by designated initializers too.
- if (maxElementsKnown && elementIndex < maxElements)
+ if ((maxElementsKnown && elementIndex < maxElements) ||
+ Entity.isVariableLengthArrayNew())
CheckEmptyInitializable(InitializedEntity::InitializeElement(
SemaRef.Context, 0, Entity),
IList->getLocEnd());
@@ -2896,7 +2909,8 @@ DeclarationName InitializedEntity::getName() const {
case EK_Variable:
case EK_Member:
- return VariableOrMember->getDeclName();
+ case EK_Binding:
+ return Variable.VariableOrMember->getDeclName();
case EK_LambdaCapture:
return DeclarationName(Capture.VarID);
@@ -2919,11 +2933,12 @@ DeclarationName InitializedEntity::getName() const {
llvm_unreachable("Invalid EntityKind!");
}
-DeclaratorDecl *InitializedEntity::getDecl() const {
+ValueDecl *InitializedEntity::getDecl() const {
switch (getKind()) {
case EK_Variable:
case EK_Member:
- return VariableOrMember;
+ case EK_Binding:
+ return Variable.VariableOrMember;
case EK_Parameter:
case EK_Parameter_CF_Audited:
@@ -2958,6 +2973,7 @@ bool InitializedEntity::allowsNRVO() const {
case EK_Parameter:
case EK_Parameter_CF_Audited:
case EK_Member:
+ case EK_Binding:
case EK_New:
case EK_Temporary:
case EK_CompoundLiteralInit:
@@ -2989,6 +3005,7 @@ unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const {
case EK_Result: OS << "Result"; break;
case EK_Exception: OS << "Exception"; break;
case EK_Member: OS << "Member"; break;
+ case EK_Binding: OS << "Binding"; break;
case EK_New: OS << "New"; break;
case EK_Temporary: OS << "Temporary"; break;
case EK_CompoundLiteralInit: OS << "CompoundLiteral";break;
@@ -3005,9 +3022,9 @@ unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const {
break;
}
- if (Decl *D = getDecl()) {
+ if (auto *D = getDecl()) {
OS << " ";
- cast<NamedDecl>(D)->printQualifiedName(OS);
+ D->printQualifiedName(OS);
}
OS << " '" << getType().getAsString() << "'\n";
@@ -3031,6 +3048,7 @@ void InitializationSequence::Step::Destroy() {
case SK_CastDerivedToBaseLValue:
case SK_BindReference:
case SK_BindReferenceToTemporary:
+ case SK_FinalCopy:
case SK_ExtraneousCopyToTemporary:
case SK_UserConversion:
case SK_QualificationConversionRValue:
@@ -3047,7 +3065,10 @@ void InitializationSequence::Step::Destroy() {
case SK_CAssignment:
case SK_StringInit:
case SK_ObjCObjectConversion:
+ case SK_ArrayLoopIndex:
+ case SK_ArrayLoopInit:
case SK_ArrayInit:
+ case SK_GNUArrayInit:
case SK_ParenthesizedArrayInit:
case SK_PassByIndirectCopyRestore:
case SK_PassByIndirectRestore:
@@ -3056,6 +3077,7 @@ void InitializationSequence::Step::Destroy() {
case SK_StdInitializerListConstructorCall:
case SK_OCLSamplerInit:
case SK_OCLZeroEvent:
+ case SK_OCLZeroQueue:
break;
case SK_ConversionSequence:
@@ -3065,7 +3087,14 @@ void InitializationSequence::Step::Destroy() {
}
bool InitializationSequence::isDirectReferenceBinding() const {
- return !Steps.empty() && Steps.back().Kind == SK_BindReference;
+ // There can be some lvalue adjustments after the SK_BindReference step.
+ for (auto I = Steps.rbegin(); I != Steps.rend(); ++I) {
+ if (I->Kind == SK_BindReference)
+ return true;
+ if (I->Kind == SK_BindReferenceToTemporary)
+ return false;
+ }
+ return false;
}
bool InitializationSequence::isAmbiguous() const {
@@ -3082,6 +3111,8 @@ bool InitializationSequence::isAmbiguous() const {
case FK_IncompatWideStringIntoWideChar:
case FK_AddressOfOverloadFailed: // FIXME: Could do better
case FK_NonConstLValueReferenceBindingToTemporary:
+ case FK_NonConstLValueReferenceBindingToBitfield:
+ case FK_NonConstLValueReferenceBindingToVectorElement:
case FK_NonConstLValueReferenceBindingToUnrelated:
case FK_RValueReferenceBindingToLValue:
case FK_ReferenceInitDropsQualifiers:
@@ -3150,6 +3181,13 @@ void InitializationSequence::AddReferenceBindingStep(QualType T,
Steps.push_back(S);
}
+void InitializationSequence::AddFinalCopy(QualType T) {
+ Step S;
+ S.Kind = SK_FinalCopy;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
void InitializationSequence::AddExtraneousCopyToTemporary(QualType T) {
Step S;
S.Kind = SK_ExtraneousCopyToTemporary;
@@ -3266,9 +3304,20 @@ void InitializationSequence::AddObjCObjectConversionStep(QualType T) {
Steps.push_back(S);
}
-void InitializationSequence::AddArrayInitStep(QualType T) {
+void InitializationSequence::AddArrayInitStep(QualType T, bool IsGNUExtension) {
Step S;
- S.Kind = SK_ArrayInit;
+ S.Kind = IsGNUExtension ? SK_GNUArrayInit : SK_ArrayInit;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddArrayInitLoopStep(QualType T, QualType EltT) {
+ Step S;
+ S.Kind = SK_ArrayLoopIndex;
+ S.Type = EltT;
+ Steps.insert(Steps.begin(), S);
+
+ S.Kind = SK_ArrayLoopInit;
S.Type = T;
Steps.push_back(S);
}
@@ -3317,6 +3366,13 @@ void InitializationSequence::AddOCLZeroEventStep(QualType T) {
Steps.push_back(S);
}
+void InitializationSequence::AddOCLZeroQueueStep(QualType T) {
+ Step S;
+ S.Kind = SK_OCLZeroQueue;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
void InitializationSequence::RewrapReferenceInitList(QualType T,
InitListExpr *Syntactic) {
assert(Syntactic->getNumInits() == 1 &&
@@ -3434,6 +3490,23 @@ static bool TryInitializerListConstruction(Sema &S,
return true;
}
+/// Determine if the constructor has the signature of a copy or move
+/// constructor for the type T of the class in which it was found. That is,
+/// determine if its first parameter is of type T or reference to (possibly
+/// cv-qualified) T.
+static bool hasCopyOrMoveCtorParam(ASTContext &Ctx,
+ const ConstructorInfo &Info) {
+ if (Info.Constructor->getNumParams() == 0)
+ return false;
+
+ QualType ParmT =
+ Info.Constructor->getParamDecl(0)->getType().getNonReferenceType();
+ QualType ClassT =
+ Ctx.getRecordType(cast<CXXRecordDecl>(Info.FoundDecl->getDeclContext()));
+
+ return Ctx.hasSameUnqualifiedType(ParmT, ClassT);
+}
+
static OverloadingResult
ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
MultiExprArg Args,
@@ -3441,59 +3514,56 @@ ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
DeclContext::lookup_result Ctors,
OverloadCandidateSet::iterator &Best,
bool CopyInitializing, bool AllowExplicit,
- bool OnlyListConstructors, bool IsListInit) {
+ bool OnlyListConstructors, bool IsListInit,
+ bool SecondStepOfCopyInit = false) {
CandidateSet.clear();
for (NamedDecl *D : Ctors) {
auto Info = getConstructorInfo(D);
- if (!Info.Constructor)
+ if (!Info.Constructor || Info.Constructor->isInvalidDecl())
continue;
- bool SuppressUserConversions = false;
-
- if (!Info.ConstructorTmpl) {
- // C++11 [over.best.ics]p4:
- // ... and the constructor or user-defined conversion function is a
- // candidate by
- // - 13.3.1.3, when the argument is the temporary in the second step
- // of a class copy-initialization, or
- // - 13.3.1.4, 13.3.1.5, or 13.3.1.6 (in all cases),
- // user-defined conversion sequences are not considered.
- // FIXME: This breaks backward compatibility, e.g. PR12117. As a
- // temporary fix, let's re-instate the third bullet above until
- // there is a resolution in the standard, i.e.,
- // - 13.3.1.7 when the initializer list has exactly one element that is
- // itself an initializer list and a conversion to some class X or
- // reference to (possibly cv-qualified) X is considered for the first
- // parameter of a constructor of X.
- if ((CopyInitializing ||
- (IsListInit && Args.size() == 1 && isa<InitListExpr>(Args[0]))) &&
- Info.Constructor->isCopyOrMoveConstructor())
- SuppressUserConversions = true;
- }
-
- if (!Info.Constructor->isInvalidDecl() &&
- (AllowExplicit || !Info.Constructor->isExplicit()) &&
- (!OnlyListConstructors || S.isInitListConstructor(Info.Constructor))) {
- if (Info.ConstructorTmpl)
- S.AddTemplateOverloadCandidate(Info.ConstructorTmpl, Info.FoundDecl,
- /*ExplicitArgs*/ nullptr, Args,
- CandidateSet, SuppressUserConversions);
- else {
- // C++ [over.match.copy]p1:
- // - When initializing a temporary to be bound to the first parameter
- // of a constructor that takes a reference to possibly cv-qualified
- // T as its first argument, called with a single argument in the
- // context of direct-initialization, explicit conversion functions
- // are also considered.
- bool AllowExplicitConv = AllowExplicit && !CopyInitializing &&
- Args.size() == 1 &&
- Info.Constructor->isCopyOrMoveConstructor();
- S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl, Args,
- CandidateSet, SuppressUserConversions,
- /*PartialOverloading=*/false,
- /*AllowExplicit=*/AllowExplicitConv);
- }
+ if (!AllowExplicit && Info.Constructor->isExplicit())
+ continue;
+
+ if (OnlyListConstructors && !S.isInitListConstructor(Info.Constructor))
+ continue;
+
+ // C++11 [over.best.ics]p4:
+ // ... and the constructor or user-defined conversion function is a
+ // candidate by
+ // - 13.3.1.3, when the argument is the temporary in the second step
+ // of a class copy-initialization, or
+ // - 13.3.1.4, 13.3.1.5, or 13.3.1.6 (in all cases), [not handled here]
+ // - the second phase of 13.3.1.7 when the initializer list has exactly
+ // one element that is itself an initializer list, and the target is
+ // the first parameter of a constructor of class X, and the conversion
+ // is to X or reference to (possibly cv-qualified X),
+ // user-defined conversion sequences are not considered.
+ bool SuppressUserConversions =
+ SecondStepOfCopyInit ||
+ (IsListInit && Args.size() == 1 && isa<InitListExpr>(Args[0]) &&
+ hasCopyOrMoveCtorParam(S.Context, Info));
+
+ if (Info.ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(Info.ConstructorTmpl, Info.FoundDecl,
+ /*ExplicitArgs*/ nullptr, Args,
+ CandidateSet, SuppressUserConversions);
+ else {
+ // C++ [over.match.copy]p1:
+ // - When initializing a temporary to be bound to the first parameter
+ // of a constructor [for type T] that takes a reference to possibly
+ // cv-qualified T as its first argument, called with a single
+ // argument in the context of direct-initialization, explicit
+ // conversion functions are also considered.
+ // FIXME: What if a constructor template instantiates to such a signature?
+ bool AllowExplicitConv = AllowExplicit && !CopyInitializing &&
+ Args.size() == 1 &&
+ hasCopyOrMoveCtorParam(S.Context, Info);
+ S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl, Args,
+ CandidateSet, SuppressUserConversions,
+ /*PartialOverloading=*/false,
+ /*AllowExplicit=*/AllowExplicitConv);
}
}
@@ -3504,6 +3574,9 @@ ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
/// \brief Attempt initialization by constructor (C++ [dcl.init]), which
/// enumerates the constructors of the initialized entity and performs overload
/// resolution to select the best.
+/// \param DestType The destination class type.
+/// \param DestArrayType The destination type, which is either DestType or
+/// a (possibly multidimensional) array of DestType.
/// \param IsListInit Is this list-initialization?
/// \param IsInitListCopy Is this non-list-initialization resulting from a
/// list-initialization from {x} where x is the same
@@ -3512,11 +3585,18 @@ static void TryConstructorInitialization(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
MultiExprArg Args, QualType DestType,
+ QualType DestArrayType,
InitializationSequence &Sequence,
bool IsListInit = false,
bool IsInitListCopy = false) {
- assert((!IsListInit || (Args.size() == 1 && isa<InitListExpr>(Args[0]))) &&
- "IsListInit must come with a single initializer list argument.");
+ assert(((!IsListInit && !IsInitListCopy) ||
+ (Args.size() == 1 && isa<InitListExpr>(Args[0]))) &&
+ "IsListInit/IsInitListCopy must come with a single initializer list "
+ "argument.");
+ InitListExpr *ILE =
+ (IsListInit || IsInitListCopy) ? cast<InitListExpr>(Args[0]) : nullptr;
+ MultiExprArg UnwrappedArgs =
+ ILE ? MultiExprArg(ILE->getInits(), ILE->getNumInits()) : Args;
// The type we're constructing needs to be complete.
if (!S.isCompleteType(Kind.getLocation(), DestType)) {
@@ -3524,6 +3604,25 @@ static void TryConstructorInitialization(Sema &S,
return;
}
+ // C++1z [dcl.init]p17:
+ // - If the initializer expression is a prvalue and the cv-unqualified
+ // version of the source type is the same class as the class of the
+ // destination, the initializer expression is used to initialize the
+ // destination object.
+ // Per DR (no number yet), this does not apply when initializing a base
+ // class or delegating to another constructor from a mem-initializer.
+ if (S.getLangOpts().CPlusPlus1z &&
+ Entity.getKind() != InitializedEntity::EK_Base &&
+ Entity.getKind() != InitializedEntity::EK_Delegating &&
+ UnwrappedArgs.size() == 1 && UnwrappedArgs[0]->isRValue() &&
+ S.Context.hasSameUnqualifiedType(UnwrappedArgs[0]->getType(), DestType)) {
+ // Convert qualifications if necessary.
+ Sequence.AddQualificationConversionStep(DestType, VK_RValue);
+ if (ILE)
+ Sequence.RewrapReferenceInitList(DestType, ILE);
+ return;
+ }
+
const RecordType *DestRecordType = DestType->getAs<RecordType>();
assert(DestRecordType && "Constructor initialization requires record type");
CXXRecordDecl *DestRecordDecl
@@ -3557,20 +3656,16 @@ static void TryConstructorInitialization(Sema &S,
// constructors of the class T and the argument list consists of the
// initializer list as a single argument.
if (IsListInit) {
- InitListExpr *ILE = cast<InitListExpr>(Args[0]);
AsInitializerList = true;
// If the initializer list has no elements and T has a default constructor,
// the first phase is omitted.
- if (ILE->getNumInits() != 0 || !DestRecordDecl->hasDefaultConstructor())
+ if (!(UnwrappedArgs.empty() && DestRecordDecl->hasDefaultConstructor()))
Result = ResolveConstructorOverload(S, Kind.getLocation(), Args,
CandidateSet, Ctors, Best,
CopyInitialization, AllowExplicit,
/*OnlyListConstructor=*/true,
IsListInit);
-
- // Time to unwrap the init list.
- Args = MultiExprArg(ILE->getInits(), ILE->getNumInits());
}
// C++11 [over.match.list]p1:
@@ -3580,7 +3675,7 @@ static void TryConstructorInitialization(Sema &S,
// elements of the initializer list.
if (Result == OR_No_Viable_Function) {
AsInitializerList = false;
- Result = ResolveConstructorOverload(S, Kind.getLocation(), Args,
+ Result = ResolveConstructorOverload(S, Kind.getLocation(), UnwrappedArgs,
CandidateSet, Ctors, Best,
CopyInitialization, AllowExplicit,
/*OnlyListConstructors=*/false,
@@ -3624,7 +3719,7 @@ static void TryConstructorInitialization(Sema &S,
// subsumed by the initialization.
bool HadMultipleCandidates = (CandidateSet.size() > 1);
Sequence.AddConstructorInitializationStep(
- Best->FoundDecl, CtorDecl, DestType, HadMultipleCandidates,
+ Best->FoundDecl, CtorDecl, DestArrayType, HadMultipleCandidates,
IsListInit | IsInitListCopy, AsInitializerList);
}
@@ -3790,10 +3885,11 @@ static void TryListInitialization(Sema &S,
QualType InitType = InitList->getInit(0)->getType();
if (S.Context.hasSameUnqualifiedType(InitType, DestType) ||
S.IsDerivedFrom(InitList->getLocStart(), InitType, DestType)) {
- Expr *InitAsExpr = InitList->getInit(0);
- TryConstructorInitialization(S, Entity, Kind, InitAsExpr, DestType,
- Sequence, /*InitListSyntax*/ false,
- /*IsInitListCopy*/ true);
+ Expr *InitListAsExpr = InitList;
+ TryConstructorInitialization(S, Entity, Kind, InitListAsExpr, DestType,
+ DestType, Sequence,
+ /*InitListSyntax*/false,
+ /*IsInitListCopy*/true);
return;
}
}
@@ -3848,7 +3944,7 @@ static void TryListInitialization(Sema &S,
// - Otherwise, if T is a class type, constructors are considered.
Expr *InitListAsExpr = InitList;
TryConstructorInitialization(S, Entity, Kind, InitListAsExpr, DestType,
- Sequence, /*InitListSyntax*/ true);
+ DestType, Sequence, /*InitListSyntax*/true);
} else
Sequence.SetFailed(InitializationSequence::FK_InitListBadDestinationType);
return;
@@ -3940,12 +4036,10 @@ static void TryListInitialization(Sema &S,
/// \brief Try a reference initialization that involves calling a conversion
/// function.
-static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
- const InitializedEntity &Entity,
- const InitializationKind &Kind,
- Expr *Initializer,
- bool AllowRValues,
- InitializationSequence &Sequence) {
+static OverloadingResult TryRefInitWithConversionFunction(
+ Sema &S, const InitializedEntity &Entity, const InitializationKind &Kind,
+ Expr *Initializer, bool AllowRValues, bool IsLValueRef,
+ InitializationSequence &Sequence) {
QualType DestType = Entity.getType();
QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
QualType T1 = cv1T1.getUnqualifiedType();
@@ -4061,58 +4155,68 @@ static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
// use this initialization. Mark it as referenced.
Function->setReferenced();
- // Compute the returned type of the conversion.
+ // Compute the returned type and value kind of the conversion.
+ QualType cv3T3;
if (isa<CXXConversionDecl>(Function))
- T2 = Function->getReturnType();
+ cv3T3 = Function->getReturnType();
else
- T2 = cv1T1;
-
- // Add the user-defined conversion step.
- bool HadMultipleCandidates = (CandidateSet.size() > 1);
- Sequence.AddUserConversionStep(Function, Best->FoundDecl,
- T2.getNonLValueExprType(S.Context),
- HadMultipleCandidates);
+ cv3T3 = T1;
- // Determine whether we need to perform derived-to-base or
- // cv-qualification adjustments.
ExprValueKind VK = VK_RValue;
- if (T2->isLValueReferenceType())
+ if (cv3T3->isLValueReferenceType())
VK = VK_LValue;
- else if (const RValueReferenceType *RRef = T2->getAs<RValueReferenceType>())
+ else if (const auto *RRef = cv3T3->getAs<RValueReferenceType>())
VK = RRef->getPointeeType()->isFunctionType() ? VK_LValue : VK_XValue;
+ cv3T3 = cv3T3.getNonLValueExprType(S.Context);
+
+ // Add the user-defined conversion step.
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl, cv3T3,
+ HadMultipleCandidates);
+ // Determine whether we'll need to perform derived-to-base adjustments or
+ // other conversions.
bool NewDerivedToBase = false;
bool NewObjCConversion = false;
bool NewObjCLifetimeConversion = false;
Sema::ReferenceCompareResult NewRefRelationship
- = S.CompareReferenceRelationship(DeclLoc, T1,
- T2.getNonLValueExprType(S.Context),
+ = S.CompareReferenceRelationship(DeclLoc, T1, cv3T3,
NewDerivedToBase, NewObjCConversion,
NewObjCLifetimeConversion);
+
+ // Add the final conversion sequence, if necessary.
if (NewRefRelationship == Sema::Ref_Incompatible) {
- // If the type we've converted to is not reference-related to the
- // type we're looking for, then there is another conversion step
- // we need to perform to produce a temporary of the right type
- // that we'll be binding to.
+ assert(!isa<CXXConstructorDecl>(Function) &&
+ "should not have conversion after constructor");
+
ImplicitConversionSequence ICS;
ICS.setStandard();
ICS.Standard = Best->FinalConversion;
- T2 = ICS.Standard.getToType(2);
- Sequence.AddConversionSequenceStep(ICS, T2);
- } else if (NewDerivedToBase)
- Sequence.AddDerivedToBaseCastStep(
- S.Context.getQualifiedType(T1,
- T2.getNonReferenceType().getQualifiers()),
- VK);
- else if (NewObjCConversion)
- Sequence.AddObjCObjectConversionStep(
- S.Context.getQualifiedType(T1,
- T2.getNonReferenceType().getQualifiers()));
+ Sequence.AddConversionSequenceStep(ICS, ICS.Standard.getToType(2));
+
+ // Every implicit conversion results in a prvalue, except for a glvalue
+ // derived-to-base conversion, which we handle below.
+ cv3T3 = ICS.Standard.getToType(2);
+ VK = VK_RValue;
+ }
- if (cv1T1.getQualifiers() != T2.getNonReferenceType().getQualifiers())
- Sequence.AddQualificationConversionStep(cv1T1, VK);
+ // If the converted initializer is a prvalue, its type T4 is adjusted to
+ // type "cv1 T4" and the temporary materialization conversion is applied.
+ //
+ // We adjust the cv-qualifications to match the reference regardless of
+ // whether we have a prvalue so that the AST records the change. In this
+ // case, T4 is "cv3 T3".
+ QualType cv1T4 = S.Context.getQualifiedType(cv3T3, cv1T1.getQualifiers());
+ if (cv1T4.getQualifiers() != cv3T3.getQualifiers())
+ Sequence.AddQualificationConversionStep(cv1T4, VK);
+ Sequence.AddReferenceBindingStep(cv1T4, VK == VK_RValue);
+ VK = IsLValueRef ? VK_LValue : VK_XValue;
+
+ if (NewDerivedToBase)
+ Sequence.AddDerivedToBaseCastStep(cv1T1, VK);
+ else if (NewObjCConversion)
+ Sequence.AddObjCObjectConversionStep(cv1T1);
- Sequence.AddReferenceBindingStep(cv1T1, !T2->isReferenceType());
return OR_Success;
}
@@ -4146,54 +4250,11 @@ static void TryReferenceInitialization(Sema &S,
T1Quals, cv2T2, T2, T2Quals, Sequence);
}
-/// Converts the target of reference initialization so that it has the
-/// appropriate qualifiers and value kind.
-///
-/// In this case, 'x' is an 'int' lvalue, but it needs to be 'const int'.
-/// \code
-/// int x;
-/// const int &r = x;
-/// \endcode
-///
-/// In this case the reference is binding to a bitfield lvalue, which isn't
-/// valid. Perform a load to create a lifetime-extended temporary instead.
-/// \code
-/// const int &r = someStruct.bitfield;
-/// \endcode
-static ExprValueKind
-convertQualifiersAndValueKindIfNecessary(Sema &S,
- InitializationSequence &Sequence,
- Expr *Initializer,
- QualType cv1T1,
- Qualifiers T1Quals,
- Qualifiers T2Quals,
- bool IsLValueRef) {
- bool IsNonAddressableType = Initializer->refersToBitField() ||
- Initializer->refersToVectorElement();
-
- if (IsNonAddressableType) {
- // C++11 [dcl.init.ref]p5: [...] Otherwise, the reference shall be an
- // lvalue reference to a non-volatile const type, or the reference shall be
- // an rvalue reference.
- //
- // If not, we can't make a temporary and bind to that. Give up and allow the
- // error to be diagnosed later.
- if (IsLValueRef && (!T1Quals.hasConst() || T1Quals.hasVolatile())) {
- assert(Initializer->isGLValue());
- return Initializer->getValueKind();
- }
-
- // Force a load so we can materialize a temporary.
- Sequence.AddLValueToRValueStep(cv1T1.getUnqualifiedType());
- return VK_RValue;
- }
-
- if (T1Quals != T2Quals) {
- Sequence.AddQualificationConversionStep(cv1T1,
- Initializer->getValueKind());
- }
-
- return Initializer->getValueKind();
+/// Determine whether an expression is a non-referenceable glvalue (one to
+/// which a reference can never bind). Attemting to bind a reference to
+/// such a glvalue will always create a temporary.
+static bool isNonReferenceableGLValue(Expr *E) {
+ return E->refersToBitField() || E->refersToVectorElement();
}
/// \brief Reference initialization without resolving overloaded functions.
@@ -4231,31 +4292,28 @@ static void TryReferenceInitializationCore(Sema &S,
OverloadingResult ConvOvlResult = OR_Success;
bool T1Function = T1->isFunctionType();
if (isLValueRef || T1Function) {
- if (InitCategory.isLValue() &&
- (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification ||
+ if (InitCategory.isLValue() && !isNonReferenceableGLValue(Initializer) &&
+ (RefRelationship == Sema::Ref_Compatible ||
(Kind.isCStyleOrFunctionalCast() &&
RefRelationship == Sema::Ref_Related))) {
// - is an lvalue (but is not a bit-field), and "cv1 T1" is
// reference-compatible with "cv2 T2," or
- //
- // Per C++ [over.best.ics]p2, we don't diagnose whether the lvalue is a
- // bit-field when we're determining whether the reference initialization
- // can occur. However, we do pay attention to whether it is a bit-field
- // to decide whether we're actually binding to a temporary created from
- // the bit-field.
+ if (T1Quals != T2Quals)
+ // Convert to cv1 T2. This should only add qualifiers unless this is a
+ // c-style cast. The removal of qualifiers in that case notionally
+ // happens after the reference binding, but that doesn't matter.
+ Sequence.AddQualificationConversionStep(
+ S.Context.getQualifiedType(T2, T1Quals),
+ Initializer->getValueKind());
if (DerivedToBase)
- Sequence.AddDerivedToBaseCastStep(
- S.Context.getQualifiedType(T1, T2Quals),
- VK_LValue);
+ Sequence.AddDerivedToBaseCastStep(cv1T1, VK_LValue);
else if (ObjCConversion)
- Sequence.AddObjCObjectConversionStep(
- S.Context.getQualifiedType(T1, T2Quals));
-
- ExprValueKind ValueKind =
- convertQualifiersAndValueKindIfNecessary(S, Sequence, Initializer,
- cv1T1, T1Quals, T2Quals,
- isLValueRef);
- Sequence.AddReferenceBindingStep(cv1T1, ValueKind == VK_RValue);
+ Sequence.AddObjCObjectConversionStep(cv1T1);
+
+ // We only create a temporary here when binding a reference to a
+ // bit-field or vector element. Those cases are't supposed to be
+ // handled by this bullet, but the outcome is the same either way.
+ Sequence.AddReferenceBindingStep(cv1T1, false);
return;
}
@@ -4270,7 +4328,8 @@ static void TryReferenceInitializationCore(Sema &S,
if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType() &&
(isLValueRef || InitCategory.isRValue())) {
ConvOvlResult = TryRefInitWithConversionFunction(
- S, Entity, Kind, Initializer, /*AllowRValues*/isRValueRef, Sequence);
+ S, Entity, Kind, Initializer, /*AllowRValues*/ isRValueRef,
+ /*IsLValueRef*/ isLValueRef, Sequence);
if (ConvOvlResult == OR_Success)
return;
if (ConvOvlResult != OR_No_Viable_Function)
@@ -4290,28 +4349,51 @@ static void TryReferenceInitializationCore(Sema &S,
Sequence.SetOverloadFailure(
InitializationSequence::FK_ReferenceInitOverloadFailed,
ConvOvlResult);
- else
- Sequence.SetFailed(InitCategory.isLValue()
- ? (RefRelationship == Sema::Ref_Related
- ? InitializationSequence::FK_ReferenceInitDropsQualifiers
- : InitializationSequence::FK_NonConstLValueReferenceBindingToUnrelated)
- : InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
-
+ else if (!InitCategory.isLValue())
+ Sequence.SetFailed(
+ InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
+ else {
+ InitializationSequence::FailureKind FK;
+ switch (RefRelationship) {
+ case Sema::Ref_Compatible:
+ if (Initializer->refersToBitField())
+ FK = InitializationSequence::
+ FK_NonConstLValueReferenceBindingToBitfield;
+ else if (Initializer->refersToVectorElement())
+ FK = InitializationSequence::
+ FK_NonConstLValueReferenceBindingToVectorElement;
+ else
+ llvm_unreachable("unexpected kind of compatible initializer");
+ break;
+ case Sema::Ref_Related:
+ FK = InitializationSequence::FK_ReferenceInitDropsQualifiers;
+ break;
+ case Sema::Ref_Incompatible:
+ FK = InitializationSequence::
+ FK_NonConstLValueReferenceBindingToUnrelated;
+ break;
+ }
+ Sequence.SetFailed(FK);
+ }
return;
}
// - If the initializer expression
- // - is an xvalue, class prvalue, array prvalue, or function lvalue and
- // "cv1 T1" is reference-compatible with "cv2 T2"
- // Note: functions are handled below.
+ // - is an
+ // [<=14] xvalue (but not a bit-field), class prvalue, array prvalue, or
+ // [1z] rvalue (but not a bit-field) or
+ // function lvalue and "cv1 T1" is reference-compatible with "cv2 T2"
+ //
+ // Note: functions are handled above and below rather than here...
if (!T1Function &&
- (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification ||
+ (RefRelationship == Sema::Ref_Compatible ||
(Kind.isCStyleOrFunctionalCast() &&
RefRelationship == Sema::Ref_Related)) &&
- (InitCategory.isXValue() ||
- (InitCategory.isPRValue() && T2->isRecordType()) ||
- (InitCategory.isPRValue() && T2->isArrayType()))) {
- ExprValueKind ValueKind = InitCategory.isXValue()? VK_XValue : VK_RValue;
+ ((InitCategory.isXValue() && !isNonReferenceableGLValue(Initializer)) ||
+ (InitCategory.isPRValue() &&
+ (S.getLangOpts().CPlusPlus1z || T2->isRecordType() ||
+ T2->isArrayType())))) {
+ ExprValueKind ValueKind = InitCategory.isXValue() ? VK_XValue : VK_RValue;
if (InitCategory.isPRValue() && T2->isRecordType()) {
// The corresponding bullet in C++03 [dcl.init.ref]p5 gives the
// compiler the freedom to perform a copy here or bind to the
@@ -4328,19 +4410,22 @@ static void TryReferenceInitializationCore(Sema &S,
CheckCXX98CompatAccessibleCopy(S, Entity, Initializer);
}
+ // C++1z [dcl.init.ref]/5.2.1.2:
+ // If the converted initializer is a prvalue, its type T4 is adjusted
+ // to type "cv1 T4" and the temporary materialization conversion is
+ // applied.
+ QualType cv1T4 = S.Context.getQualifiedType(cv2T2, T1Quals);
+ if (T1Quals != T2Quals)
+ Sequence.AddQualificationConversionStep(cv1T4, ValueKind);
+ Sequence.AddReferenceBindingStep(cv1T4, ValueKind == VK_RValue);
+ ValueKind = isLValueRef ? VK_LValue : VK_XValue;
+
+ // In any case, the reference is bound to the resulting glvalue (or to
+ // an appropriate base class subobject).
if (DerivedToBase)
- Sequence.AddDerivedToBaseCastStep(S.Context.getQualifiedType(T1, T2Quals),
- ValueKind);
+ Sequence.AddDerivedToBaseCastStep(cv1T1, ValueKind);
else if (ObjCConversion)
- Sequence.AddObjCObjectConversionStep(
- S.Context.getQualifiedType(T1, T2Quals));
-
- ValueKind = convertQualifiersAndValueKindIfNecessary(S, Sequence,
- Initializer, cv1T1,
- T1Quals, T2Quals,
- isLValueRef);
-
- Sequence.AddReferenceBindingStep(cv1T1, ValueKind == VK_RValue);
+ Sequence.AddObjCObjectConversionStep(cv1T1);
return;
}
@@ -4353,7 +4438,8 @@ static void TryReferenceInitializationCore(Sema &S,
if (T2->isRecordType()) {
if (RefRelationship == Sema::Ref_Incompatible) {
ConvOvlResult = TryRefInitWithConversionFunction(
- S, Entity, Kind, Initializer, /*AllowRValues*/true, Sequence);
+ S, Entity, Kind, Initializer, /*AllowRValues*/ true,
+ /*IsLValueRef*/ isLValueRef, Sequence);
if (ConvOvlResult)
Sequence.SetOverloadFailure(
InitializationSequence::FK_ReferenceInitOverloadFailed,
@@ -4362,8 +4448,7 @@ static void TryReferenceInitializationCore(Sema &S,
return;
}
- if ((RefRelationship == Sema::Ref_Compatible ||
- RefRelationship == Sema::Ref_Compatible_With_Added_Qualification) &&
+ if (RefRelationship == Sema::Ref_Compatible &&
isRValueRef && InitCategory.isLValue()) {
Sequence.SetFailed(
InitializationSequence::FK_RValueReferenceBindingToLValue);
@@ -4462,23 +4547,21 @@ static void TryValueInitialization(Sema &S,
if (const RecordType *RT = T->getAs<RecordType>()) {
if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
bool NeedZeroInitialization = true;
- if (!S.getLangOpts().CPlusPlus11) {
- // C++98:
- // -- if T is a class type (clause 9) with a user-declared constructor
- // (12.1), then the default constructor for T is called (and the
- // initialization is ill-formed if T has no accessible default
- // constructor);
- if (ClassDecl->hasUserDeclaredConstructor())
- NeedZeroInitialization = false;
- } else {
- // C++11:
- // -- if T is a class type (clause 9) with either no default constructor
- // (12.1 [class.ctor]) or a default constructor that is user-provided
- // or deleted, then the object is default-initialized;
- CXXConstructorDecl *CD = S.LookupDefaultConstructor(ClassDecl);
- if (!CD || !CD->getCanonicalDecl()->isDefaulted() || CD->isDeleted())
- NeedZeroInitialization = false;
- }
+ // C++98:
+ // -- if T is a class type (clause 9) with a user-declared constructor
+ // (12.1), then the default constructor for T is called (and the
+ // initialization is ill-formed if T has no accessible default
+ // constructor);
+ // C++11:
+ // -- if T is a class type (clause 9) with either no default constructor
+ // (12.1 [class.ctor]) or a default constructor that is user-provided
+ // or deleted, then the object is default-initialized;
+ //
+ // Note that the C++11 rule is the same as the C++98 rule if there are no
+ // defaulted or deleted constructors, so we just use it unconditionally.
+ CXXConstructorDecl *CD = S.LookupDefaultConstructor(ClassDecl);
+ if (!CD || !CD->getCanonicalDecl()->isDefaulted() || CD->isDeleted())
+ NeedZeroInitialization = false;
// -- if T is a (possibly cv-qualified) non-union class type without a
// user-provided or deleted default constructor, then the object is
@@ -4512,8 +4595,10 @@ static void TryValueInitialization(Sema &S,
MultiExprArg Args(&InitListAsExpr, InitList ? 1 : 0);
bool InitListSyntax = InitList;
- return TryConstructorInitialization(S, Entity, Kind, Args, T, Sequence,
- InitListSyntax);
+ // FIXME: Instead of creating a CXXConstructExpr of array type here,
+ // wrap a class-typed CXXConstructExpr in an ArrayInitLoopExpr.
+ return TryConstructorInitialization(
+ S, Entity, Kind, Args, T, Entity.getType(), Sequence, InitListSyntax);
}
}
@@ -4536,7 +4621,8 @@ static void TryDefaultInitialization(Sema &S,
// constructor for T is called (and the initialization is ill-formed if
// T has no accessible default constructor);
if (DestType->isRecordType() && S.getLangOpts().CPlusPlus) {
- TryConstructorInitialization(S, Entity, Kind, None, DestType, Sequence);
+ TryConstructorInitialization(S, Entity, Kind, None, DestType,
+ Entity.getType(), Sequence);
return;
}
@@ -4680,26 +4766,55 @@ static void TryUserDefinedConversion(Sema &S,
Sequence.AddUserConversionStep(Function, Best->FoundDecl,
DestType.getUnqualifiedType(),
HadMultipleCandidates);
+
+ // C++14 and before:
+ // - if the function is a constructor, the call initializes a temporary
+ // of the cv-unqualified version of the destination type. The [...]
+ // temporary [...] is then used to direct-initialize, according to the
+ // rules above, the object that is the destination of the
+ // copy-initialization.
+ // Note that this just performs a simple object copy from the temporary.
+ //
+ // C++1z:
+ // - if the function is a constructor, the call is a prvalue of the
+ // cv-unqualified version of the destination type whose return object
+ // is initialized by the constructor. The call is used to
+ // direct-initialize, according to the rules above, the object that
+ // is the destination of the copy-initialization.
+ // Therefore we need to do nothing further.
+ //
+ // FIXME: Mark this copy as extraneous.
+ if (!S.getLangOpts().CPlusPlus1z)
+ Sequence.AddFinalCopy(DestType);
+ else if (DestType.hasQualifiers())
+ Sequence.AddQualificationConversionStep(DestType, VK_RValue);
return;
}
// Add the user-defined conversion step that calls the conversion function.
QualType ConvType = Function->getCallResultType();
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl, ConvType,
+ HadMultipleCandidates);
+
if (ConvType->getAs<RecordType>()) {
- // If we're converting to a class type, there may be an copy of
- // the resulting temporary object (possible to create an object of
- // a base class type). That copy is not a separate conversion, so
- // we just make a note of the actual destination type (possibly a
- // base class of the type returned by the conversion function) and
- // let the user-defined conversion step handle the conversion.
- Sequence.AddUserConversionStep(Function, Best->FoundDecl, DestType,
- HadMultipleCandidates);
+ // The call is used to direct-initialize [...] the object that is the
+ // destination of the copy-initialization.
+ //
+ // In C++1z, this does not call a constructor if we enter /17.6.1:
+ // - If the initializer expression is a prvalue and the cv-unqualified
+ // version of the source type is the same as the class of the
+ // destination [... do not make an extra copy]
+ //
+ // FIXME: Mark this copy as extraneous.
+ if (!S.getLangOpts().CPlusPlus1z ||
+ Function->getReturnType()->isReferenceType() ||
+ !S.Context.hasSameUnqualifiedType(ConvType, DestType))
+ Sequence.AddFinalCopy(DestType);
+ else if (!S.Context.hasSameType(ConvType, DestType))
+ Sequence.AddQualificationConversionStep(DestType, VK_RValue);
return;
}
- Sequence.AddUserConversionStep(Function, Best->FoundDecl, ConvType,
- HadMultipleCandidates);
-
// If the conversion following the call to the conversion function
// is interesting, add it as a separate step.
if (Best->FinalConversion.First || Best->FinalConversion.Second ||
@@ -4886,7 +5001,8 @@ static bool TryOCLSamplerInitialization(Sema &S,
QualType DestType,
Expr *Initializer) {
if (!S.getLangOpts().OpenCL || !DestType->isSamplerT() ||
- !Initializer->isIntegerConstantExpr(S.getASTContext()))
+ (!Initializer->isIntegerConstantExpr(S.Context) &&
+ !Initializer->getType()->isSamplerT()))
return false;
Sequence.AddOCLSamplerInitStep(DestType);
@@ -4914,6 +5030,20 @@ static bool TryOCLZeroEventInitialization(Sema &S,
return true;
}
+static bool TryOCLZeroQueueInitialization(Sema &S,
+ InitializationSequence &Sequence,
+ QualType DestType,
+ Expr *Initializer) {
+ if (!S.getLangOpts().OpenCL || S.getLangOpts().OpenCLVersion < 200 ||
+ !DestType->isQueueT() ||
+ !Initializer->isIntegerConstantExpr(S.getASTContext()) ||
+ (Initializer->EvaluateKnownConstInt(S.getASTContext()) != 0))
+ return false;
+
+ Sequence.AddOCLZeroQueueStep(DestType);
+ return true;
+}
+
InitializationSequence::InitializationSequence(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -4936,6 +5066,42 @@ static bool isExprAnUnaddressableFunction(Sema &S, const Expr *E) {
cast<FunctionDecl>(DRE->getDecl()));
}
+/// Determine whether we can perform an elementwise array copy for this kind
+/// of entity.
+static bool canPerformArrayCopy(const InitializedEntity &Entity) {
+ switch (Entity.getKind()) {
+ case InitializedEntity::EK_LambdaCapture:
+ // C++ [expr.prim.lambda]p24:
+ // For array members, the array elements are direct-initialized in
+ // increasing subscript order.
+ return true;
+
+ case InitializedEntity::EK_Variable:
+ // C++ [dcl.decomp]p1:
+ // [...] each element is copy-initialized or direct-initialized from the
+ // corresponding element of the assignment-expression [...]
+ return isa<DecompositionDecl>(Entity.getDecl());
+
+ case InitializedEntity::EK_Member:
+ // C++ [class.copy.ctor]p14:
+ // - if the member is an array, each element is direct-initialized with
+ // the corresponding subobject of x
+ return Entity.isImplicitMemberInitializer();
+
+ case InitializedEntity::EK_ArrayElement:
+ // All the above cases are intended to apply recursively, even though none
+ // of them actually say that.
+ if (auto *E = Entity.getParent())
+ return canPerformArrayCopy(*E);
+ break;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
void InitializationSequence::InitializeFrom(Sema &S,
const InitializedEntity &Entity,
const InitializationKind &Kind,
@@ -5058,6 +5224,34 @@ void InitializationSequence::InitializeFrom(Sema &S,
}
}
+ // Some kinds of initialization permit an array to be initialized from
+ // another array of the same type, and perform elementwise initialization.
+ if (Initializer && isa<ConstantArrayType>(DestAT) &&
+ S.Context.hasSameUnqualifiedType(Initializer->getType(),
+ Entity.getType()) &&
+ canPerformArrayCopy(Entity)) {
+ // If source is a prvalue, use it directly.
+ if (Initializer->getValueKind() == VK_RValue) {
+ AddArrayInitStep(DestType, /*IsGNUExtension*/false);
+ return;
+ }
+
+ // Emit element-at-a-time copy loop.
+ InitializedEntity Element =
+ InitializedEntity::InitializeElement(S.Context, 0, Entity);
+ QualType InitEltT =
+ Context.getAsArrayType(Initializer->getType())->getElementType();
+ OpaqueValueExpr OVE(Initializer->getExprLoc(), InitEltT,
+ Initializer->getValueKind(),
+ Initializer->getObjectKind());
+ Expr *OVEAsExpr = &OVE;
+ InitializeFrom(S, Element, Kind, OVEAsExpr, TopLevelOfInitList,
+ TreatUnavailableAsInvalid);
+ if (!Failed())
+ AddArrayInitLoopStep(Entity.getType(), InitEltT);
+ return;
+ }
+
// Note: as an GNU C extension, we allow initialization of an
// array from a compound literal that creates an array of the same
// type, so long as the initializer has no side effects.
@@ -5071,7 +5265,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
else if (Initializer->HasSideEffects(S.Context))
SetFailed(FK_NonConstantArrayInit);
else {
- AddArrayInitStep(DestType);
+ AddArrayInitStep(DestType, /*IsGNUExtension*/true);
}
}
// Note: as a GNU C++ extension, we allow list-initialization of a
@@ -5112,6 +5306,9 @@ void InitializationSequence::InitializeFrom(Sema &S,
if (TryOCLZeroEventInitialization(S, *this, DestType, Initializer))
return;
+ if (TryOCLZeroQueueInitialization(S, *this, DestType, Initializer))
+ return;
+
// Handle initialization in C
AddCAssignmentStep(DestType);
MaybeProduceObjCObject(S, *this, Entity);
@@ -5131,7 +5328,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
(Context.hasSameUnqualifiedType(SourceType, DestType) ||
S.IsDerivedFrom(Initializer->getLocStart(), SourceType, DestType))))
TryConstructorInitialization(S, Entity, Kind, Args,
- DestType, *this);
+ DestType, DestType, *this);
// - Otherwise (i.e., for the remaining copy-initialization cases),
// user-defined conversion sequences that can convert from the source
// type to the destination type or (when a conversion function is
@@ -5270,6 +5467,7 @@ getAssignmentAction(const InitializedEntity &Entity, bool Diagnose = false) {
return Sema::AA_Casting;
case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_Binding:
case InitializedEntity::EK_ArrayElement:
case InitializedEntity::EK_VectorElement:
case InitializedEntity::EK_ComplexElement:
@@ -5305,6 +5503,7 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
case InitializedEntity::EK_Parameter_CF_Audited:
case InitializedEntity::EK_Temporary:
case InitializedEntity::EK_RelatedResult:
+ case InitializedEntity::EK_Binding:
return true;
}
@@ -5313,7 +5512,7 @@ static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
/// \brief Whether the given entity, when initialized with an object
/// created for that initialization, requires destruction.
-static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
+static bool shouldDestroyEntity(const InitializedEntity &Entity) {
switch (Entity.getKind()) {
case InitializedEntity::EK_Result:
case InitializedEntity::EK_New:
@@ -5326,6 +5525,7 @@ static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
return false;
case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_Binding:
case InitializedEntity::EK_Variable:
case InitializedEntity::EK_Parameter:
case InitializedEntity::EK_Parameter_CF_Audited:
@@ -5340,50 +5540,6 @@ static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
llvm_unreachable("missed an InitializedEntity kind?");
}
-/// \brief Look for copy and move constructors and constructor templates, for
-/// copying an object via direct-initialization (per C++11 [dcl.init]p16).
-static void LookupCopyAndMoveConstructors(Sema &S,
- OverloadCandidateSet &CandidateSet,
- CXXRecordDecl *Class,
- Expr *CurInitExpr) {
- DeclContext::lookup_result R = S.LookupConstructors(Class);
- // The container holding the constructors can under certain conditions
- // be changed while iterating (e.g. because of deserialization).
- // To be safe we copy the lookup results to a new container.
- SmallVector<NamedDecl*, 16> Ctors(R.begin(), R.end());
- for (SmallVectorImpl<NamedDecl *>::iterator
- CI = Ctors.begin(), CE = Ctors.end(); CI != CE; ++CI) {
- NamedDecl *D = *CI;
- auto Info = getConstructorInfo(D);
- if (!Info.Constructor)
- continue;
-
- if (!Info.ConstructorTmpl) {
- // Handle copy/move constructors, only.
- if (Info.Constructor->isInvalidDecl() ||
- !Info.Constructor->isCopyOrMoveConstructor() ||
- !Info.Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
- continue;
-
- S.AddOverloadCandidate(Info.Constructor, Info.FoundDecl,
- CurInitExpr, CandidateSet);
- continue;
- }
-
- // Handle constructor templates.
- if (Info.ConstructorTmpl->isInvalidDecl())
- continue;
-
- if (!Info.Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
- continue;
-
- // FIXME: Do we need to limit this to copy-constructor-like
- // candidates?
- S.AddTemplateOverloadCandidate(Info.ConstructorTmpl, Info.FoundDecl,
- nullptr, CurInitExpr, CandidateSet, true);
- }
-}
-
/// \brief Get the location at which initialization diagnostics should appear.
static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
Expr *Initializer) {
@@ -5395,6 +5551,7 @@ static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
return Entity.getThrowLoc();
case InitializedEntity::EK_Variable:
+ case InitializedEntity::EK_Binding:
return Entity.getDecl()->getLocation();
case InitializedEntity::EK_LambdaCapture:
@@ -5453,39 +5610,24 @@ static ExprResult CopyObject(Sema &S,
if (!Class)
return CurInit;
- // C++0x [class.copy]p32:
- // When certain criteria are met, an implementation is allowed to
- // omit the copy/move construction of a class object, even if the
- // copy/move constructor and/or destructor for the object have
- // side effects. [...]
- // - when a temporary class object that has not been bound to a
- // reference (12.2) would be copied/moved to a class object
- // with the same cv-unqualified type, the copy/move operation
- // can be omitted by constructing the temporary object
- // directly into the target of the omitted copy/move
- //
- // Note that the other three bullets are handled elsewhere. Copy
- // elision for return statements and throw expressions are handled as part
- // of constructor initialization, while copy elision for exception handlers
- // is handled by the run-time.
- bool Elidable = CurInitExpr->isTemporaryObject(S.Context, Class);
SourceLocation Loc = getInitializationLoc(Entity, CurInit.get());
// Make sure that the type we are copying is complete.
if (S.RequireCompleteType(Loc, T, diag::err_temp_copy_incomplete))
return CurInit;
- // Perform overload resolution using the class's copy/move constructors.
- // Only consider constructors and constructor templates. Per
- // C++0x [dcl.init]p16, second bullet to class types, this initialization
+ // Perform overload resolution using the class's constructors. Per
+ // C++11 [dcl.init]p16, second bullet for class types, this initialization
// is direct-initialization.
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
- LookupCopyAndMoveConstructors(S, CandidateSet, Class, CurInitExpr);
-
- bool HadMultipleCandidates = (CandidateSet.size() > 1);
+ DeclContext::lookup_result Ctors = S.LookupConstructors(Class);
OverloadCandidateSet::iterator Best;
- switch (CandidateSet.BestViableFunction(S, Loc, Best)) {
+ switch (ResolveConstructorOverload(
+ S, Loc, CurInitExpr, CandidateSet, Ctors, Best,
+ /*CopyInitializing=*/false, /*AllowExplicit=*/true,
+ /*OnlyListConstructors=*/false, /*IsListInit=*/false,
+ /*SecondStepOfCopyInit=*/true)) {
case OR_Success:
break;
@@ -5515,6 +5657,8 @@ static ExprResult CopyObject(Sema &S,
return ExprError();
}
+ bool HadMultipleCandidates = CandidateSet.size() > 1;
+
CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
SmallVector<Expr*, 8> ConstructorArgs;
CurInit.get(); // Ownership transferred into MultiExprArg, below.
@@ -5554,6 +5698,31 @@ static ExprResult CopyObject(Sema &S,
if (S.CompleteConstructorCall(Constructor, CurInitExpr, Loc, ConstructorArgs))
return ExprError();
+ // C++0x [class.copy]p32:
+ // When certain criteria are met, an implementation is allowed to
+ // omit the copy/move construction of a class object, even if the
+ // copy/move constructor and/or destructor for the object have
+ // side effects. [...]
+ // - when a temporary class object that has not been bound to a
+ // reference (12.2) would be copied/moved to a class object
+ // with the same cv-unqualified type, the copy/move operation
+ // can be omitted by constructing the temporary object
+ // directly into the target of the omitted copy/move
+ //
+ // Note that the other three bullets are handled elsewhere. Copy
+ // elision for return statements and throw expressions are handled as part
+ // of constructor initialization, while copy elision for exception handlers
+ // is handled by the run-time.
+ //
+ // FIXME: If the function parameter is not the same type as the temporary, we
+ // should still be able to elide the copy, but we don't have a way to
+ // represent in the AST how much should be elided in this case.
+ bool Elidable =
+ CurInitExpr->isTemporaryObject(S.Context, Class) &&
+ S.Context.hasSameUnqualifiedType(
+ Best->Function->getParamDecl(0)->getType().getNonReferenceType(),
+ CurInitExpr->getType());
+
// Actually perform the constructor call.
CurInit = S.BuildCXXConstructExpr(Loc, T, Best->FoundDecl, Constructor,
Elidable,
@@ -5589,12 +5758,16 @@ static void CheckCXX98CompatAccessibleCopy(Sema &S,
// Find constructors which would have been considered.
OverloadCandidateSet CandidateSet(Loc, OverloadCandidateSet::CSK_Normal);
- LookupCopyAndMoveConstructors(
- S, CandidateSet, cast<CXXRecordDecl>(Record->getDecl()), CurInitExpr);
+ DeclContext::lookup_result Ctors =
+ S.LookupConstructors(cast<CXXRecordDecl>(Record->getDecl()));
// Perform overload resolution.
OverloadCandidateSet::iterator Best;
- OverloadingResult OR = CandidateSet.BestViableFunction(S, Loc, Best);
+ OverloadingResult OR = ResolveConstructorOverload(
+ S, Loc, CurInitExpr, CandidateSet, Ctors, Best,
+ /*CopyInitializing=*/false, /*AllowExplicit=*/true,
+ /*OnlyListConstructors=*/false, /*IsListInit=*/false,
+ /*SecondStepOfCopyInit=*/true);
PartialDiagnostic Diag = S.PDiag(diag::warn_cxx98_compat_temp_copy)
<< OR << (int)Entity.getKind() << CurInitExpr->getType()
@@ -5643,11 +5816,6 @@ void InitializationSequence::PrintInitLocationNote(Sema &S,
<< Entity.getMethodDecl()->getDeclName();
}
-static bool isReferenceBinding(const InitializationSequence::Step &s) {
- return s.Kind == InitializationSequence::SK_BindReference ||
- s.Kind == InitializationSequence::SK_BindReferenceToTemporary;
-}
-
/// Returns true if the parameters describe a constructor initialization of
/// an explicit temporary object, e.g. "Point(x, y)".
static bool isExplicitTemporary(const InitializedEntity &Entity,
@@ -5714,9 +5882,10 @@ PerformConstructorInitialization(Sema &S,
// T as its first argument, called with a single argument in the
// context of direct-initialization, explicit conversion functions
// are also considered.
- bool AllowExplicitConv = Kind.AllowExplicit() && !Kind.isCopyInit() &&
- Args.size() == 1 &&
- Constructor->isCopyOrMoveConstructor();
+ bool AllowExplicitConv =
+ Kind.AllowExplicit() && !Kind.isCopyInit() && Args.size() == 1 &&
+ hasCopyOrMoveCtorParam(S.Context,
+ getConstructorInfo(Step.Function.FoundDecl));
// Determine the arguments required to actually perform the constructor
// call.
@@ -5776,7 +5945,7 @@ PerformConstructorInitialization(Sema &S,
// If the entity allows NRVO, mark the construction as elidable
// unconditionally.
if (Entity.allowsNRVO())
- CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
+ CurInit = S.BuildCXXConstructExpr(Loc, Step.Type,
Step.Function.FoundDecl,
Constructor, /*Elidable=*/true,
ConstructorArgs,
@@ -5787,7 +5956,7 @@ PerformConstructorInitialization(Sema &S,
ConstructKind,
ParenOrBraceRange);
else
- CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
+ CurInit = S.BuildCXXConstructExpr(Loc, Step.Type,
Step.Function.FoundDecl,
Constructor,
ConstructorArgs,
@@ -5826,6 +5995,7 @@ InitializedEntityOutlivesFullExpression(const InitializedEntity &Entity) {
case InitializedEntity::EK_Result:
case InitializedEntity::EK_Exception:
case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_Binding:
case InitializedEntity::EK_New:
case InitializedEntity::EK_Base:
case InitializedEntity::EK_Delegating:
@@ -5875,6 +6045,11 @@ static const InitializedEntity *getEntityForTemporaryLifetimeExtension(
// ctor-initializer persists until the constructor exits.
return Entity;
+ case InitializedEntity::EK_Binding:
+ // Per [dcl.decomp]p3, the binding is treated as a variable of reference
+ // type.
+ return Entity;
+
case InitializedEntity::EK_Parameter:
case InitializedEntity::EK_Parameter_CF_Audited:
// -- A temporary bound to a reference parameter in a function call
@@ -5949,10 +6124,7 @@ performReferenceExtension(Expr *Init,
// Step over any subobject adjustments; we may have a materialized
// temporary inside them.
- SmallVector<const Expr *, 2> CommaLHSs;
- SmallVector<SubobjectAdjustment, 2> Adjustments;
- Init = const_cast<Expr *>(
- Init->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments));
+ Init = const_cast<Expr *>(Init->skipRValueSubobjectAdjustments());
// Per current approach for DR1376, look through casts to reference type
// when performing lifetime extension.
@@ -5960,9 +6132,10 @@ performReferenceExtension(Expr *Init,
if (CE->getSubExpr()->isGLValue())
Init = CE->getSubExpr();
- // FIXME: Per DR1213, subscripting on an array temporary produces an xvalue.
- // It's unclear if binding a reference to that xvalue extends the array
- // temporary.
+ // Per the current approach for DR1299, look through array element access
+ // when performing lifetime extension.
+ if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Init))
+ Init = ASE->getBase();
} while (Init != Old);
if (MaterializeTemporaryExpr *ME = dyn_cast<MaterializeTemporaryExpr>(Init)) {
@@ -5982,10 +6155,7 @@ performReferenceExtension(Expr *Init,
static void performLifetimeExtension(Expr *Init,
const InitializedEntity *ExtendingEntity) {
// Dig out the expression which constructs the extended temporary.
- SmallVector<const Expr *, 2> CommaLHSs;
- SmallVector<SubobjectAdjustment, 2> Adjustments;
- Init = const_cast<Expr *>(
- Init->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments));
+ Init = const_cast<Expr *>(Init->skipRValueSubobjectAdjustments());
if (CXXBindTemporaryExpr *BTE = dyn_cast<CXXBindTemporaryExpr>(Init))
Init = BTE->getSubExpr();
@@ -6204,6 +6374,24 @@ Sema::CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
return MTE;
}
+ExprResult Sema::TemporaryMaterializationConversion(Expr *E) {
+ // In C++98, we don't want to implicitly create an xvalue.
+ // FIXME: This means that AST consumers need to deal with "prvalues" that
+ // denote materialized temporaries. Maybe we should add another ValueKind
+ // for "xvalue pretending to be a prvalue" for C++98 support.
+ if (!E->isRValue() || !getLangOpts().CPlusPlus11)
+ return E;
+
+ // C++1z [conv.rval]/1: T shall be a complete type.
+ // FIXME: Does this ever matter (can we form a prvalue of incomplete type)?
+ // If so, we should check for a non-abstract class type here too.
+ QualType T = E->getType();
+ if (RequireCompleteType(E->getExprLoc(), T, diag::err_incomplete_type))
+ return ExprError();
+
+ return CreateMaterializeTemporaryExpr(E->getType(), E, false);
+}
+
ExprResult
InitializationSequence::Perform(Sema &S,
const InitializedEntity &Entity,
@@ -6250,7 +6438,7 @@ InitializationSequence::Perform(Sema &S,
SourceRange Brackets;
// Scavange the location of the brackets from the entity, if we can.
- if (DeclaratorDecl *DD = Entity.getDecl()) {
+ if (auto *DD = dyn_cast_or_null<DeclaratorDecl>(Entity.getDecl())) {
if (TypeSourceInfo *TInfo = DD->getTypeSourceInfo()) {
TypeLoc TL = TInfo->getTypeLoc();
if (IncompleteArrayTypeLoc ArrayLoc =
@@ -6302,7 +6490,9 @@ InitializationSequence::Perform(Sema &S,
if (Args.size() == 1 && Args[0]->getType()->isArrayType() &&
Entity.getType()->isPointerType() &&
InitializedEntityOutlivesFullExpression(Entity)) {
- Expr *Init = Args[0];
+ const Expr *Init = Args[0]->skipRValueSubobjectAdjustments();
+ if (auto *MTE = dyn_cast<MaterializeTemporaryExpr>(Init))
+ Init = MTE->GetTemporaryExpr();
Expr::LValueClassification Kind = Init->ClassifyLValue(S.Context);
if (Kind == Expr::LV_ClassTemporary || Kind == Expr::LV_ArrayTemporary)
S.Diag(Init->getLocStart(), diag::warn_temporary_array_to_pointer_decay)
@@ -6318,6 +6508,7 @@ InitializationSequence::Perform(Sema &S,
Entity.getType();
ExprResult CurInit((Expr *)nullptr);
+ SmallVector<Expr*, 4> ArrayLoopCommonExprs;
// For initialization steps that start with a single initializer,
// grab the only argument out the Args and place it into the "current"
@@ -6329,6 +6520,7 @@ InitializationSequence::Perform(Sema &S,
case SK_CastDerivedToBaseLValue:
case SK_BindReference:
case SK_BindReferenceToTemporary:
+ case SK_FinalCopy:
case SK_ExtraneousCopyToTemporary:
case SK_UserConversion:
case SK_QualificationConversionLValue:
@@ -6344,14 +6536,18 @@ InitializationSequence::Perform(Sema &S,
case SK_CAssignment:
case SK_StringInit:
case SK_ObjCObjectConversion:
+ case SK_ArrayLoopIndex:
+ case SK_ArrayLoopInit:
case SK_ArrayInit:
+ case SK_GNUArrayInit:
case SK_ParenthesizedArrayInit:
case SK_PassByIndirectCopyRestore:
case SK_PassByIndirectRestore:
case SK_ProduceObjCObject:
case SK_StdInitializerList:
case SK_OCLSamplerInit:
- case SK_OCLZeroEvent: {
+ case SK_OCLZeroEvent:
+ case SK_OCLZeroQueue: {
assert(Args.size() == 1);
CurInit = Args[0];
if (!CurInit.get()) return ExprError();
@@ -6365,6 +6561,17 @@ InitializationSequence::Perform(Sema &S,
break;
}
+ // C++ [class.abstract]p2:
+ // no objects of an abstract class can be created except as subobjects
+ // of a class derived from it
+ auto checkAbstractType = [&](QualType T) -> bool {
+ if (Entity.getKind() == InitializedEntity::EK_Base ||
+ Entity.getKind() == InitializedEntity::EK_Delegating)
+ return false;
+ return S.RequireNonAbstractType(Kind.getLocation(), T,
+ diag::err_allocation_of_abstract_type);
+ };
+
// Walk through the computed steps for the initialization sequence,
// performing the specified conversions along the way.
bool ConstructorInitRequiresZeroInit = false;
@@ -6416,30 +6623,6 @@ InitializationSequence::Perform(Sema &S,
}
case SK_BindReference:
- // References cannot bind to bit-fields (C++ [dcl.init.ref]p5).
- if (CurInit.get()->refersToBitField()) {
- // We don't necessarily have an unambiguous source bit-field.
- FieldDecl *BitField = CurInit.get()->getSourceBitField();
- S.Diag(Kind.getLocation(), diag::err_reference_bind_to_bitfield)
- << Entity.getType().isVolatileQualified()
- << (BitField ? BitField->getDeclName() : DeclarationName())
- << (BitField != nullptr)
- << CurInit.get()->getSourceRange();
- if (BitField)
- S.Diag(BitField->getLocation(), diag::note_bitfield_decl);
-
- return ExprError();
- }
-
- if (CurInit.get()->refersToVectorElement()) {
- // References cannot bind to vector elements.
- S.Diag(Kind.getLocation(), diag::err_reference_bind_to_vector_element)
- << Entity.getType().isVolatileQualified()
- << CurInit.get()->getSourceRange();
- PrintInitLocationNote(S, Entity);
- return ExprError();
- }
-
// Reference binding does not have any corresponding ASTs.
// Check exception specifications
@@ -6469,15 +6652,15 @@ InitializationSequence::Perform(Sema &S,
// Materialize the temporary into memory.
MaterializeTemporaryExpr *MTE = S.CreateMaterializeTemporaryExpr(
- Entity.getType().getNonReferenceType(), CurInit.get(),
- Entity.getType()->isLValueReferenceType());
+ Step->Type, CurInit.get(), Entity.getType()->isLValueReferenceType());
// Maybe lifetime-extend the temporary's subobjects to match the
// entity's lifetime.
if (const InitializedEntity *ExtendingEntity =
getEntityForTemporaryLifetimeExtension(&Entity))
if (performReferenceExtension(MTE, ExtendingEntity))
- warnOnLifetimeExtension(S, Entity, CurInit.get(), /*IsInitializerList=*/false,
+ warnOnLifetimeExtension(S, Entity, CurInit.get(),
+ /*IsInitializerList=*/false,
ExtendingEntity->getDecl());
// If we're binding to an Objective-C object that has lifetime, we
@@ -6494,6 +6677,21 @@ InitializationSequence::Perform(Sema &S,
break;
}
+ case SK_FinalCopy:
+ if (checkAbstractType(Step->Type))
+ return ExprError();
+
+ // If the overall initialization is initializing a temporary, we already
+ // bound our argument if it was necessary to do so. If not (if we're
+ // ultimately initializing a non-temporary), our argument needs to be
+ // bound since it's initializing a function parameter.
+ // FIXME: This is a mess. Rationalize temporary destruction.
+ if (!shouldBindAsTemporary(Entity))
+ CurInit = S.MaybeBindToTemporary(CurInit.get());
+ CurInit = CopyObject(S, Step->Type, Entity, CurInit,
+ /*IsExtraneousCopy=*/false);
+ break;
+
case SK_ExtraneousCopyToTemporary:
CurInit = CopyObject(S, Step->Type, Entity, CurInit,
/*IsExtraneousCopy=*/true);
@@ -6503,7 +6701,6 @@ InitializationSequence::Perform(Sema &S,
// We have a user-defined conversion that invokes either a constructor
// or a conversion function.
CastKind CastKind;
- bool IsCopy = false;
FunctionDecl *Fn = Step->Function.Function;
DeclAccessPair FoundFn = Step->Function.FoundDecl;
bool HadMultipleCandidates = Step->Function.HadMultipleCandidates;
@@ -6512,7 +6709,6 @@ InitializationSequence::Perform(Sema &S,
// Build a call to the selected constructor.
SmallVector<Expr*, 8> ConstructorArgs;
SourceLocation Loc = CurInit.get()->getLocStart();
- CurInit.get(); // Ownership transferred into MultiExprArg, below.
// Determine the arguments required to actually perform the constructor
// call.
@@ -6541,11 +6737,6 @@ InitializationSequence::Perform(Sema &S,
return ExprError();
CastKind = CK_ConstructorConversion;
- QualType Class = S.Context.getTypeDeclType(Constructor->getParent());
- if (S.Context.hasSameUnqualifiedType(SourceType, Class) ||
- S.IsDerivedFrom(Loc, SourceType, Class))
- IsCopy = true;
-
CreatedObject = true;
} else {
// Build a call to the conversion function.
@@ -6558,29 +6749,38 @@ InitializationSequence::Perform(Sema &S,
// FIXME: Should we move this initialization into a separate
// derived-to-base conversion? I believe the answer is "no", because
// we don't want to turn off access control here for c-style casts.
- ExprResult CurInitExprRes =
- S.PerformObjectArgumentInitialization(CurInit.get(),
- /*Qualifier=*/nullptr,
- FoundFn, Conversion);
- if(CurInitExprRes.isInvalid())
+ CurInit = S.PerformObjectArgumentInitialization(CurInit.get(),
+ /*Qualifier=*/nullptr,
+ FoundFn, Conversion);
+ if (CurInit.isInvalid())
return ExprError();
- CurInit = CurInitExprRes;
// Build the actual call to the conversion function.
CurInit = S.BuildCXXMemberCallExpr(CurInit.get(), FoundFn, Conversion,
HadMultipleCandidates);
- if (CurInit.isInvalid() || !CurInit.get())
+ if (CurInit.isInvalid())
return ExprError();
CastKind = CK_UserDefinedConversion;
-
CreatedObject = Conversion->getReturnType()->isRecordType();
}
- bool RequiresCopy = !IsCopy && !isReferenceBinding(Steps.back());
- bool MaybeBindToTemp = RequiresCopy || shouldBindAsTemporary(Entity);
+ if (CreatedObject && checkAbstractType(CurInit.get()->getType()))
+ return ExprError();
+
+ CurInit = ImplicitCastExpr::Create(S.Context, CurInit.get()->getType(),
+ CastKind, CurInit.get(), nullptr,
+ CurInit.get()->getValueKind());
- if (!MaybeBindToTemp && CreatedObject && shouldDestroyTemporary(Entity)) {
+ if (shouldBindAsTemporary(Entity))
+ // The overall entity is temporary, so this expression should be
+ // destroyed at the end of its full-expression.
+ CurInit = S.MaybeBindToTemporary(CurInit.getAs<Expr>());
+ else if (CreatedObject && shouldDestroyEntity(Entity)) {
+ // The object outlasts the full-expression, but we need to prepare for
+ // a destructor being run on it.
+ // FIXME: It makes no sense to do this here. This should happen
+ // regardless of how we initialized the entity.
QualType T = CurInit.get()->getType();
if (const RecordType *Record = T->getAs<RecordType>()) {
CXXDestructorDecl *Destructor
@@ -6592,15 +6792,6 @@ InitializationSequence::Perform(Sema &S,
return ExprError();
}
}
-
- CurInit = ImplicitCastExpr::Create(S.Context, CurInit.get()->getType(),
- CastKind, CurInit.get(), nullptr,
- CurInit.get()->getValueKind());
- if (MaybeBindToTemp)
- CurInit = S.MaybeBindToTemporary(CurInit.getAs<Expr>());
- if (RequiresCopy)
- CurInit = CopyObject(S, Entity.getType().getNonReferenceType(), Entity,
- CurInit, /*IsExtraneousCopy=*/false);
break;
}
@@ -6645,16 +6836,23 @@ InitializationSequence::Perform(Sema &S,
getAssignmentAction(Entity), CCK);
if (CurInitExprRes.isInvalid())
return ExprError();
+
+ S.DiscardMisalignedMemberAddress(Step->Type.getTypePtr(), CurInit.get());
+
CurInit = CurInitExprRes;
if (Step->Kind == SK_ConversionSequenceNoNarrowing &&
- S.getLangOpts().CPlusPlus && !CurInit.get()->isValueDependent())
+ S.getLangOpts().CPlusPlus)
DiagnoseNarrowingInInitList(S, *Step->ICS, SourceType, Entity.getType(),
CurInit.get());
+
break;
}
case SK_ListInitialization: {
+ if (checkAbstractType(Step->Type))
+ return ExprError();
+
InitListExpr *InitList = cast<InitListExpr>(CurInit.get());
// If we're not initializing the top-level entity, we need to create an
// InitializeTemporary entity for our target type.
@@ -6691,6 +6889,9 @@ InitializationSequence::Perform(Sema &S,
}
case SK_ConstructorInitializationFromList: {
+ if (checkAbstractType(Step->Type))
+ return ExprError();
+
// When an initializer list is passed for a parameter of type "reference
// to object", we don't get an EK_Temporary entity, but instead an
// EK_Parameter entity with reference type.
@@ -6734,6 +6935,9 @@ InitializationSequence::Perform(Sema &S,
case SK_ConstructorInitialization:
case SK_StdInitializerListConstructorCall: {
+ if (checkAbstractType(Step->Type))
+ return ExprError();
+
// When an initializer list is passed for a parameter of type "reference
// to object", we don't get an EK_Temporary entity, but instead an
// EK_Parameter entity with reference type.
@@ -6745,13 +6949,15 @@ InitializationSequence::Perform(Sema &S,
bool UseTemporary = Entity.getType()->isReferenceType();
bool IsStdInitListInit =
Step->Kind == SK_StdInitializerListConstructorCall;
+ Expr *Source = CurInit.get();
CurInit = PerformConstructorInitialization(
- S, UseTemporary ? TempEntity : Entity, Kind, Args, *Step,
+ S, UseTemporary ? TempEntity : Entity, Kind,
+ Source ? MultiExprArg(Source) : Args, *Step,
ConstructorInitRequiresZeroInit,
- /*IsListInitialization*/IsStdInitListInit,
- /*IsStdInitListInitialization*/IsStdInitListInit,
- /*LBraceLoc*/SourceLocation(),
- /*RBraceLoc*/SourceLocation());
+ /*IsListInitialization*/ IsStdInitListInit,
+ /*IsStdInitListInitialization*/ IsStdInitListInit,
+ /*LBraceLoc*/ SourceLocation(),
+ /*RBraceLoc*/ SourceLocation());
break;
}
@@ -6830,13 +7036,36 @@ InitializationSequence::Perform(Sema &S,
CurInit.get()->getValueKind());
break;
- case SK_ArrayInit:
+ case SK_ArrayLoopIndex: {
+ Expr *Cur = CurInit.get();
+ Expr *BaseExpr = new (S.Context)
+ OpaqueValueExpr(Cur->getExprLoc(), Cur->getType(),
+ Cur->getValueKind(), Cur->getObjectKind(), Cur);
+ Expr *IndexExpr =
+ new (S.Context) ArrayInitIndexExpr(S.Context.getSizeType());
+ CurInit = S.CreateBuiltinArraySubscriptExpr(
+ BaseExpr, Kind.getLocation(), IndexExpr, Kind.getLocation());
+ ArrayLoopCommonExprs.push_back(BaseExpr);
+ break;
+ }
+
+ case SK_ArrayLoopInit: {
+ assert(!ArrayLoopCommonExprs.empty() &&
+ "mismatched SK_ArrayLoopIndex and SK_ArrayLoopInit");
+ Expr *Common = ArrayLoopCommonExprs.pop_back_val();
+ CurInit = new (S.Context) ArrayInitLoopExpr(Step->Type, Common,
+ CurInit.get());
+ break;
+ }
+
+ case SK_GNUArrayInit:
// Okay: we checked everything before creating this step. Note that
// this is a GNU extension.
S.Diag(Kind.getLocation(), diag::ext_array_init_copy)
<< Step->Type << CurInit.get()->getType()
<< CurInit.get()->getSourceRange();
-
+ LLVM_FALLTHROUGH;
+ case SK_ArrayInit:
// If the destination type is an incomplete array type, update the
// type accordingly.
if (ResultType) {
@@ -6904,19 +7133,93 @@ InitializationSequence::Perform(Sema &S,
}
case SK_OCLSamplerInit: {
- assert(Step->Type->isSamplerT() &&
+ // Sampler initialzation have 5 cases:
+ // 1. function argument passing
+ // 1a. argument is a file-scope variable
+ // 1b. argument is a function-scope variable
+ // 1c. argument is one of caller function's parameters
+ // 2. variable initialization
+ // 2a. initializing a file-scope variable
+ // 2b. initializing a function-scope variable
+ //
+ // For file-scope variables, since they cannot be initialized by function
+ // call of __translate_sampler_initializer in LLVM IR, their references
+ // need to be replaced by a cast from their literal initializers to
+ // sampler type. Since sampler variables can only be used in function
+ // calls as arguments, we only need to replace them when handling the
+ // argument passing.
+ assert(Step->Type->isSamplerT() &&
"Sampler initialization on non-sampler type.");
-
- QualType SourceType = CurInit.get()->getType();
-
+ Expr *Init = CurInit.get();
+ QualType SourceType = Init->getType();
+ // Case 1
if (Entity.isParameterKind()) {
- if (!SourceType->isSamplerT())
+ if (!SourceType->isSamplerT()) {
S.Diag(Kind.getLocation(), diag::err_sampler_argument_required)
<< SourceType;
- } else if (Entity.getKind() != InitializedEntity::EK_Variable) {
- llvm_unreachable("Invalid EntityKind!");
+ break;
+ } else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Init)) {
+ auto Var = cast<VarDecl>(DRE->getDecl());
+ // Case 1b and 1c
+ // No cast from integer to sampler is needed.
+ if (!Var->hasGlobalStorage()) {
+ CurInit = ImplicitCastExpr::Create(S.Context, Step->Type,
+ CK_LValueToRValue, Init,
+ /*BasePath=*/nullptr, VK_RValue);
+ break;
+ }
+ // Case 1a
+ // For function call with a file-scope sampler variable as argument,
+ // get the integer literal.
+ // Do not diagnose if the file-scope variable does not have initializer
+ // since this has already been diagnosed when parsing the variable
+ // declaration.
+ if (!Var->getInit() || !isa<ImplicitCastExpr>(Var->getInit()))
+ break;
+ Init = cast<ImplicitCastExpr>(const_cast<Expr*>(
+ Var->getInit()))->getSubExpr();
+ SourceType = Init->getType();
+ }
+ } else {
+ // Case 2
+ // Check initializer is 32 bit integer constant.
+ // If the initializer is taken from global variable, do not diagnose since
+ // this has already been done when parsing the variable declaration.
+ if (!Init->isConstantInitializer(S.Context, false))
+ break;
+
+ if (!SourceType->isIntegerType() ||
+ 32 != S.Context.getIntWidth(SourceType)) {
+ S.Diag(Kind.getLocation(), diag::err_sampler_initializer_not_integer)
+ << SourceType;
+ break;
+ }
+
+ llvm::APSInt Result;
+ Init->EvaluateAsInt(Result, S.Context);
+ const uint64_t SamplerValue = Result.getLimitedValue();
+ // 32-bit value of sampler's initializer is interpreted as
+ // bit-field with the following structure:
+ // |unspecified|Filter|Addressing Mode| Normalized Coords|
+ // |31 6|5 4|3 1| 0|
+ // This structure corresponds to enum values of sampler properties
+ // defined in SPIR spec v1.2 and also opencl-c.h
+ unsigned AddressingMode = (0x0E & SamplerValue) >> 1;
+ unsigned FilterMode = (0x30 & SamplerValue) >> 4;
+ if (FilterMode != 1 && FilterMode != 2)
+ S.Diag(Kind.getLocation(),
+ diag::warn_sampler_initializer_invalid_bits)
+ << "Filter Mode";
+ if (AddressingMode > 4)
+ S.Diag(Kind.getLocation(),
+ diag::warn_sampler_initializer_invalid_bits)
+ << "Addressing Mode";
}
+ // Cases 1a, 2a and 2b
+ // Insert cast from integer to sampler.
+ CurInit = S.ImpCastExprToType(Init, S.Context.OCLSamplerTy,
+ CK_IntToOCLSampler);
break;
}
case SK_OCLZeroEvent: {
@@ -6928,6 +7231,15 @@ InitializationSequence::Perform(Sema &S,
CurInit.get()->getValueKind());
break;
}
+ case SK_OCLZeroQueue: {
+ assert(Step->Type->isQueueT() &&
+ "Event initialization on non queue type.");
+
+ CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type,
+ CK_ZeroToOCLQueue,
+ CurInit.get()->getValueKind());
+ break;
+ }
}
}
@@ -7190,6 +7502,25 @@ bool InitializationSequence::Diagnose(Sema &S,
<< Args[0]->getSourceRange();
break;
+ case FK_NonConstLValueReferenceBindingToBitfield: {
+ // We don't necessarily have an unambiguous source bit-field.
+ FieldDecl *BitField = Args[0]->getSourceBitField();
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_to_bitfield)
+ << DestType.isVolatileQualified()
+ << (BitField ? BitField->getDeclName() : DeclarationName())
+ << (BitField != nullptr)
+ << Args[0]->getSourceRange();
+ if (BitField)
+ S.Diag(BitField->getLocation(), diag::note_bitfield_decl);
+ break;
+ }
+
+ case FK_NonConstLValueReferenceBindingToVectorElement:
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_to_vector_element)
+ << DestType.isVolatileQualified()
+ << Args[0]->getSourceRange();
+ break;
+
case FK_RValueReferenceBindingToLValue:
S.Diag(Kind.getLocation(), diag::err_lvalue_to_rvalue_ref)
<< DestType.getNonReferenceType() << Args[0]->getType()
@@ -7487,6 +7818,14 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "non-const lvalue reference bound to temporary";
break;
+ case FK_NonConstLValueReferenceBindingToBitfield:
+ OS << "non-const lvalue reference bound to bit-field";
+ break;
+
+ case FK_NonConstLValueReferenceBindingToVectorElement:
+ OS << "non-const lvalue reference bound to vector element";
+ break;
+
case FK_NonConstLValueReferenceBindingToUnrelated:
OS << "non-const lvalue reference bound to unrelated type";
break;
@@ -7583,15 +7922,15 @@ void InitializationSequence::dump(raw_ostream &OS) const {
break;
case SK_CastDerivedToBaseRValue:
- OS << "derived-to-base case (rvalue" << S->Type.getAsString() << ")";
+ OS << "derived-to-base (rvalue)";
break;
case SK_CastDerivedToBaseXValue:
- OS << "derived-to-base case (xvalue" << S->Type.getAsString() << ")";
+ OS << "derived-to-base (xvalue)";
break;
case SK_CastDerivedToBaseLValue:
- OS << "derived-to-base case (lvalue" << S->Type.getAsString() << ")";
+ OS << "derived-to-base (lvalue)";
break;
case SK_BindReference:
@@ -7602,6 +7941,10 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "bind reference to a temporary";
break;
+ case SK_FinalCopy:
+ OS << "final copy in class direct-initialization";
+ break;
+
case SK_ExtraneousCopyToTemporary:
OS << "extraneous C++03 copy to temporary";
break;
@@ -7678,10 +8021,22 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "Objective-C object conversion";
break;
+ case SK_ArrayLoopIndex:
+ OS << "indexing for array initialization loop";
+ break;
+
+ case SK_ArrayLoopInit:
+ OS << "array initialization loop";
+ break;
+
case SK_ArrayInit:
OS << "array initialization";
break;
+ case SK_GNUArrayInit:
+ OS << "array initialization (GNU extension)";
+ break;
+
case SK_ParenthesizedArrayInit:
OS << "parenthesized array initialization";
break;
@@ -7713,6 +8068,10 @@ void InitializationSequence::dump(raw_ostream &OS) const {
case SK_OCLZeroEvent:
OS << "OpenCL event_t from zero";
break;
+
+ case SK_OCLZeroQueue:
+ OS << "OpenCL queue_t from zero";
+ break;
}
OS << " [" << S->Type.getAsString() << ']';
@@ -7750,6 +8109,7 @@ static void DiagnoseNarrowingInInitList(Sema &S,
switch (SCS->getNarrowingKind(S.Context, PostInit, ConstantValue,
ConstantType)) {
case NK_Not_Narrowing:
+ case NK_Dependent_Narrowing:
// No narrowing occurred.
return;
diff --git a/lib/Sema/SemaLambda.cpp b/lib/Sema/SemaLambda.cpp
index 0b3af262cd61..3bae69164ffd 100644
--- a/lib/Sema/SemaLambda.cpp
+++ b/lib/Sema/SemaLambda.cpp
@@ -238,7 +238,7 @@ getGenericLambdaTemplateParameterList(LambdaScopeInfo *LSI, Sema &SemaRef) {
/*Template kw loc*/ SourceLocation(), LAngleLoc,
llvm::makeArrayRef((NamedDecl *const *)LSI->AutoTemplateParams.data(),
LSI->AutoTemplateParams.size()),
- RAngleLoc);
+ RAngleLoc, nullptr);
}
return LSI->GLTemplateParameterList;
}
@@ -361,7 +361,8 @@ CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodTypeInfo,
SourceLocation EndLoc,
- ArrayRef<ParmVarDecl *> Params) {
+ ArrayRef<ParmVarDecl *> Params,
+ const bool IsConstexprSpecified) {
QualType MethodType = MethodTypeInfo->getType();
TemplateParameterList *TemplateParams =
getGenericLambdaTemplateParameterList(getCurLambda(), *this);
@@ -398,7 +399,7 @@ CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
MethodType, MethodTypeInfo,
SC_None,
/*isInline=*/true,
- /*isConstExpr=*/false,
+ IsConstexprSpecified,
EndLoc);
Method->setAccess(AS_public);
@@ -883,14 +884,20 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, MethodTyInfo,
KnownDependent, Intro.Default);
- CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range,
- MethodTyInfo, EndLoc, Params);
+ CXXMethodDecl *Method =
+ startLambdaDefinition(Class, Intro.Range, MethodTyInfo, EndLoc, Params,
+ ParamInfo.getDeclSpec().isConstexprSpecified());
if (ExplicitParams)
CheckCXXDefaultArguments(Method);
// Attributes on the lambda apply to the method.
ProcessDeclAttributes(CurScope, Method, ParamInfo);
-
+
+ // CUDA lambdas get implicit attributes based on the scope in which they're
+ // declared.
+ if (getLangOpts().CUDA)
+ CUDASetLambdaAttrs(Method);
+
// Introduce the function call operator as the current declaration context.
PushDeclContext(CurScope, Method);
@@ -1148,14 +1155,16 @@ void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
/// \brief Add a lambda's conversion to function pointer, as described in
/// C++11 [expr.prim.lambda]p6.
-static void addFunctionPointerConversion(Sema &S,
+static void addFunctionPointerConversion(Sema &S,
SourceRange IntroducerRange,
CXXRecordDecl *Class,
CXXMethodDecl *CallOperator) {
// This conversion is explicitly disabled if the lambda's function has
// pass_object_size attributes on any of its parameters.
- if (llvm::any_of(CallOperator->parameters(),
- std::mem_fn(&ParmVarDecl::hasAttr<PassObjectSizeAttr>)))
+ auto HasPassObjectSizeAttr = [](const ParmVarDecl *P) {
+ return P->hasAttr<PassObjectSizeAttr>();
+ };
+ if (llvm::any_of(CallOperator->parameters(), HasPassObjectSizeAttr))
return;
// Add the conversion to function pointer.
@@ -1375,10 +1384,7 @@ static void addBlockPointerConversion(Sema &S,
}
static ExprResult performLambdaVarCaptureInitialization(
- Sema &S, LambdaScopeInfo::Capture &Capture,
- FieldDecl *Field,
- SmallVectorImpl<VarDecl *> &ArrayIndexVars,
- SmallVectorImpl<unsigned> &ArrayIndexStarts) {
+ Sema &S, LambdaScopeInfo::Capture &Capture, FieldDecl *Field) {
assert(Capture.isVariableCapture() && "not a variable capture");
auto *Var = Capture.getVariable();
@@ -1402,69 +1408,11 @@ static ExprResult performLambdaVarCaptureInitialization(
return ExprError();
Expr *Ref = RefResult.get();
- QualType FieldType = Field->getType();
-
- // When the variable has array type, create index variables for each
- // dimension of the array. We use these index variables to subscript
- // the source array, and other clients (e.g., CodeGen) will perform
- // the necessary iteration with these index variables.
- //
- // FIXME: This is dumb. Add a proper AST representation for array
- // copy-construction and use it here.
- SmallVector<VarDecl *, 4> IndexVariables;
- QualType BaseType = FieldType;
- QualType SizeType = S.Context.getSizeType();
- ArrayIndexStarts.push_back(ArrayIndexVars.size());
- while (const ConstantArrayType *Array
- = S.Context.getAsConstantArrayType(BaseType)) {
- // Create the iteration variable for this array index.
- IdentifierInfo *IterationVarName = nullptr;
- {
- SmallString<8> Str;
- llvm::raw_svector_ostream OS(Str);
- OS << "__i" << IndexVariables.size();
- IterationVarName = &S.Context.Idents.get(OS.str());
- }
- VarDecl *IterationVar = VarDecl::Create(
- S.Context, S.CurContext, Loc, Loc, IterationVarName, SizeType,
- S.Context.getTrivialTypeSourceInfo(SizeType, Loc), SC_None);
- IterationVar->setImplicit();
- IndexVariables.push_back(IterationVar);
- ArrayIndexVars.push_back(IterationVar);
-
- // Create a reference to the iteration variable.
- ExprResult IterationVarRef =
- S.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc);
- assert(!IterationVarRef.isInvalid() &&
- "Reference to invented variable cannot fail!");
- IterationVarRef = S.DefaultLvalueConversion(IterationVarRef.get());
- assert(!IterationVarRef.isInvalid() &&
- "Conversion of invented variable cannot fail!");
-
- // Subscript the array with this iteration variable.
- ExprResult Subscript =
- S.CreateBuiltinArraySubscriptExpr(Ref, Loc, IterationVarRef.get(), Loc);
- if (Subscript.isInvalid())
- return ExprError();
-
- Ref = Subscript.get();
- BaseType = Array->getElementType();
- }
-
- // Construct the entity that we will be initializing. For an array, this
- // will be first element in the array, which may require several levels
- // of array-subscript entities.
- SmallVector<InitializedEntity, 4> Entities;
- Entities.reserve(1 + IndexVariables.size());
- Entities.push_back(InitializedEntity::InitializeLambdaCapture(
- Var->getIdentifier(), FieldType, Loc));
- for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I)
- Entities.push_back(
- InitializedEntity::InitializeElement(S.Context, 0, Entities.back()));
-
+ auto Entity = InitializedEntity::InitializeLambdaCapture(
+ Var->getIdentifier(), Field->getType(), Loc);
InitializationKind InitKind = InitializationKind::CreateDirect(Loc, Loc, Loc);
- InitializationSequence Init(S, Entities.back(), InitKind, Ref);
- return Init.Perform(S, Entities.back(), InitKind, Ref);
+ InitializationSequence Init(S, Entity, InitKind, Ref);
+ return Init.Perform(S, Entity, InitKind, Ref);
}
ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
@@ -1505,8 +1453,6 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
bool ExplicitResultType;
CleanupInfo LambdaCleanup;
bool ContainsUnexpandedParameterPack;
- SmallVector<VarDecl *, 4> ArrayIndexVars;
- SmallVector<unsigned, 4> ArrayIndexStarts;
{
CallOperator = LSI->CallOperator;
Class = LSI->Lambda;
@@ -1540,14 +1486,12 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
LambdaCapture(From.getLocation(), IsImplicit,
From.isCopyCapture() ? LCK_StarThis : LCK_This));
CaptureInits.push_back(From.getInitExpr());
- ArrayIndexStarts.push_back(ArrayIndexVars.size());
continue;
}
if (From.isVLATypeCapture()) {
Captures.push_back(
LambdaCapture(From.getLocation(), IsImplicit, LCK_VLAType));
CaptureInits.push_back(nullptr);
- ArrayIndexStarts.push_back(ArrayIndexVars.size());
continue;
}
@@ -1557,13 +1501,11 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
Var, From.getEllipsisLoc()));
Expr *Init = From.getInitExpr();
if (!Init) {
- auto InitResult = performLambdaVarCaptureInitialization(
- *this, From, *CurField, ArrayIndexVars, ArrayIndexStarts);
+ auto InitResult =
+ performLambdaVarCaptureInitialization(*this, From, *CurField);
if (InitResult.isInvalid())
return ExprError();
Init = InitResult.get();
- } else {
- ArrayIndexStarts.push_back(ArrayIndexVars.size());
}
CaptureInits.push_back(Init);
}
@@ -1600,9 +1542,22 @@ ExprResult Sema::BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
CaptureDefault, CaptureDefaultLoc,
Captures,
ExplicitParams, ExplicitResultType,
- CaptureInits, ArrayIndexVars,
- ArrayIndexStarts, EndLoc,
+ CaptureInits, EndLoc,
ContainsUnexpandedParameterPack);
+ // If the lambda expression's call operator is not explicitly marked constexpr
+ // and we are not in a dependent context, analyze the call operator to infer
+ // its constexpr-ness, supressing diagnostics while doing so.
+ if (getLangOpts().CPlusPlus1z && !CallOperator->isInvalidDecl() &&
+ !CallOperator->isConstexpr() &&
+ !Class->getDeclContext()->isDependentContext()) {
+ TentativeAnalysisScope DiagnosticScopeGuard(*this);
+ CallOperator->setConstexpr(
+ CheckConstexprFunctionDecl(CallOperator) &&
+ CheckConstexprFunctionBody(CallOperator, CallOperator->getBody()));
+ }
+
+ // Emit delayed shadowing warnings now that the full capture list is known.
+ DiagnoseShadowingLambdaDecls(LSI);
if (!CurContext->isDependentContext()) {
switch (ExprEvalContexts.back().Context) {
diff --git a/lib/Sema/SemaLookup.cpp b/lib/Sema/SemaLookup.cpp
index e2550824fb69..38a7b8c127cc 100644
--- a/lib/Sema/SemaLookup.cpp
+++ b/lib/Sema/SemaLookup.cpp
@@ -12,9 +12,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/Lookup.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -29,6 +27,7 @@
#include "clang/Lex/ModuleLoader.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Lookup.h"
#include "clang/Sema/Overload.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
@@ -37,17 +36,13 @@
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TypoCorrection.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
-#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/ADT/edit_distance.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <iterator>
-#include <limits>
#include <list>
-#include <map>
#include <set>
#include <utility>
#include <vector>
@@ -455,15 +450,18 @@ static bool canHideTag(NamedDecl *D) {
// Given a set of declarations in a single declarative region [...]
// exactly one declaration shall declare a class name or enumeration name
// that is not a typedef name and the other declarations shall all refer to
- // the same variable or enumerator, or all refer to functions and function
- // templates; in this case the class name or enumeration name is hidden.
+ // the same variable, non-static data member, or enumerator, or all refer
+ // to functions and function templates; in this case the class name or
+ // enumeration name is hidden.
// C++ [basic.scope.hiding]p2:
// A class name or enumeration name can be hidden by the name of a
// variable, data member, function, or enumerator declared in the same
// scope.
+ // An UnresolvedUsingValueDecl always instantiates to one of these.
D = D->getUnderlyingDecl();
return isa<VarDecl>(D) || isa<EnumConstantDecl>(D) || isa<FunctionDecl>(D) ||
- isa<FunctionTemplateDecl>(D) || isa<FieldDecl>(D);
+ isa<FunctionTemplateDecl>(D) || isa<FieldDecl>(D) ||
+ isa<UnresolvedUsingValueDecl>(D);
}
/// Resolves the result kind of this lookup.
@@ -1298,7 +1296,7 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
// If we have a context, and it's not a context stashed in the
// template parameter scope for an out-of-line definition, also
// look into that context.
- if (!(Found && S && S->isTemplateParamScope())) {
+ if (!(Found && S->isTemplateParamScope())) {
assert(Ctx->isFileContext() &&
"We should have been looking only at file context here already.");
@@ -1372,8 +1370,9 @@ Module *Sema::getOwningModule(Decl *Entity) {
auto &SrcMgr = PP.getSourceManager();
SourceLocation StartLoc =
SrcMgr.getLocForStartOfFile(SrcMgr.getMainFileID());
- auto &TopLevel =
- VisibleModulesStack.empty() ? VisibleModules : VisibleModulesStack[0];
+ auto &TopLevel = ModuleScopes.empty()
+ ? VisibleModules
+ : ModuleScopes[0].OuterVisibleModules;
TopLevel.setVisible(CachedFakeTopLevelModule, StartLoc);
}
@@ -1542,12 +1541,17 @@ bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
// If this declaration is not at namespace scope nor module-private,
// then it is visible if its lexical parent has a visible definition.
DeclContext *DC = D->getLexicalDeclContext();
- if (!D->isModulePrivate() &&
- DC && !DC->isFileContext() && !isa<LinkageSpecDecl>(DC)) {
+ if (!D->isModulePrivate() && DC && !DC->isFileContext() &&
+ !isa<LinkageSpecDecl>(DC) && !isa<ExportDecl>(DC)) {
// For a parameter, check whether our current template declaration's
// lexical context is visible, not whether there's some other visible
// definition of it, because parameters aren't "within" the definition.
- if ((D->isTemplateParameter() || isa<ParmVarDecl>(D))
+ //
+ // In C++ we need to check for a visible definition due to ODR merging,
+ // and in C we must not because each declaration of a function gets its own
+ // set of declarations for tags in prototype scope.
+ if ((D->isTemplateParameter() || isa<ParmVarDecl>(D)
+ || (isa<FunctionDecl>(DC) && !SemaRef.getLangOpts().CPlusPlus))
? isVisible(SemaRef, cast<NamedDecl>(DC))
: SemaRef.hasVisibleDefinition(cast<NamedDecl>(DC))) {
if (SemaRef.ActiveTemplateInstantiations.empty() &&
@@ -5081,6 +5085,10 @@ void Sema::diagnoseTypo(const TypoCorrection &Correction,
if (PrevNote.getDiagID() && ChosenDecl)
Diag(ChosenDecl->getLocation(), PrevNote)
<< CorrectedQuotedStr << (ErrorRecovery ? FixItHint() : FixTypo);
+
+ // Add any extra diagnostics.
+ for (const PartialDiagnostic &PD : Correction.getExtraDiagnostics())
+ Diag(Correction.getCorrectionRange().getBegin(), PD);
}
TypoExpr *Sema::createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
diff --git a/lib/Sema/SemaObjCProperty.cpp b/lib/Sema/SemaObjCProperty.cpp
index 5e38751f44a5..3481b82679c2 100644
--- a/lib/Sema/SemaObjCProperty.cpp
+++ b/lib/Sema/SemaObjCProperty.cpp
@@ -753,7 +753,7 @@ static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc,
break;
case Qualifiers::OCL_Weak:
- S.Diag(ivar->getLocation(), diag::error_weak_property)
+ S.Diag(ivar->getLocation(), diag::err_weak_property)
<< property->getDeclName()
<< ivar->getDeclName();
break;
@@ -904,7 +904,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
dyn_cast<ObjCContainerDecl>(CurContext);
// Make sure we have a context for the property implementation declaration.
if (!ClassImpDecl) {
- Diag(AtLoc, diag::error_missing_property_context);
+ Diag(AtLoc, diag::err_missing_property_context);
return nullptr;
}
if (PropertyIvarLoc.isInvalid())
@@ -928,11 +928,11 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// Look for this property declaration in the @implementation's @interface
property = IDecl->FindPropertyDeclaration(PropertyId, QueryKind);
if (!property) {
- Diag(PropertyLoc, diag::error_bad_property_decl) << IDecl->getDeclName();
+ Diag(PropertyLoc, diag::err_bad_property_decl) << IDecl->getDeclName();
return nullptr;
}
if (property->isClassProperty() && Synthesize) {
- Diag(PropertyLoc, diag::error_synthesize_on_class_property) << PropertyId;
+ Diag(PropertyLoc, diag::err_synthesize_on_class_property) << PropertyId;
return nullptr;
}
unsigned PIkind = property->getPropertyAttributesAsWritten();
@@ -948,7 +948,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (const ObjCCategoryDecl *CD =
dyn_cast<ObjCCategoryDecl>(property->getDeclContext())) {
if (!CD->IsClassExtension()) {
- Diag(PropertyLoc, diag::error_category_property) << CD->getDeclName();
+ Diag(PropertyLoc, diag::err_category_property) << CD->getDeclName();
Diag(property->getLocation(), diag::note_property_declare);
return nullptr;
}
@@ -992,12 +992,12 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
} else if ((CatImplClass = dyn_cast<ObjCCategoryImplDecl>(ClassImpDecl))) {
if (Synthesize) {
- Diag(AtLoc, diag::error_synthesize_category_decl);
+ Diag(AtLoc, diag::err_synthesize_category_decl);
return nullptr;
}
IDecl = CatImplClass->getClassInterface();
if (!IDecl) {
- Diag(AtLoc, diag::error_missing_property_interface);
+ Diag(AtLoc, diag::err_missing_property_interface);
return nullptr;
}
ObjCCategoryDecl *Category =
@@ -1010,12 +1010,12 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// Look for this property declaration in @implementation's category
property = Category->FindPropertyDeclaration(PropertyId, QueryKind);
if (!property) {
- Diag(PropertyLoc, diag::error_bad_category_property_decl)
+ Diag(PropertyLoc, diag::err_bad_category_property_decl)
<< Category->getDeclName();
return nullptr;
}
} else {
- Diag(AtLoc, diag::error_bad_property_context);
+ Diag(AtLoc, diag::err_bad_property_context);
return nullptr;
}
ObjCIvarDecl *Ivar = nullptr;
@@ -1146,20 +1146,22 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
diag::err_abstract_type_in_decl,
AbstractSynthesizedIvarType)) {
Diag(property->getLocation(), diag::note_property_declare);
+ // An abstract type is as bad as an incomplete type.
+ CompleteTypeErr = true;
+ }
+ if (CompleteTypeErr)
Ivar->setInvalidDecl();
- } else if (CompleteTypeErr)
- Ivar->setInvalidDecl();
ClassImpDecl->addDecl(Ivar);
IDecl->makeDeclVisibleInContext(Ivar);
if (getLangOpts().ObjCRuntime.isFragile())
- Diag(PropertyDiagLoc, diag::error_missing_property_ivar_decl)
+ Diag(PropertyDiagLoc, diag::err_missing_property_ivar_decl)
<< PropertyId;
// Note! I deliberately want it to fall thru so, we have a
// a property implementation and to avoid future warnings.
} else if (getLangOpts().ObjCRuntime.isNonFragile() &&
!declaresSameEntity(ClassDeclared, IDecl)) {
- Diag(PropertyDiagLoc, diag::error_ivar_in_superclass_use)
+ Diag(PropertyDiagLoc, diag::err_ivar_in_superclass_use)
<< property->getDeclName() << Ivar->getDeclName()
<< ClassDeclared->getDeclName();
Diag(Ivar->getLocation(), diag::note_previous_access_declaration)
@@ -1184,7 +1186,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
== Compatible);
}
if (!compat) {
- Diag(PropertyDiagLoc, diag::error_property_ivar_type)
+ Diag(PropertyDiagLoc, diag::err_property_ivar_type)
<< property->getDeclName() << PropType
<< Ivar->getDeclName() << IvarType;
Diag(Ivar->getLocation(), diag::note_ivar_decl);
@@ -1199,7 +1201,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
QualType rhsType =Context.getCanonicalType(IvarType).getUnqualifiedType();
if (lhsType != rhsType &&
lhsType->isArithmeticType()) {
- Diag(PropertyDiagLoc, diag::error_property_ivar_type)
+ Diag(PropertyDiagLoc, diag::err_property_ivar_type)
<< property->getDeclName() << PropType
<< Ivar->getDeclName() << IvarType;
Diag(Ivar->getLocation(), diag::note_ivar_decl);
@@ -1209,7 +1211,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// __weak is explicit. So it works on Canonical type.
if ((PropType.isObjCGCWeak() && !IvarType.isObjCGCWeak() &&
getLangOpts().getGC() != LangOptions::NonGC)) {
- Diag(PropertyDiagLoc, diag::error_weak_property)
+ Diag(PropertyDiagLoc, diag::err_weak_property)
<< property->getDeclName() << Ivar->getDeclName();
Diag(Ivar->getLocation(), diag::note_ivar_decl);
// Fall thru - see previous comment
@@ -1218,7 +1220,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if ((property->getType()->isObjCObjectPointerType() ||
PropType.isObjCGCStrong()) && IvarType.isObjCGCWeak() &&
getLangOpts().getGC() != LangOptions::NonGC) {
- Diag(PropertyDiagLoc, diag::error_strong_property)
+ Diag(PropertyDiagLoc, diag::err_strong_property)
<< property->getDeclName() << Ivar->getDeclName();
// Fall thru - see previous comment
}
@@ -1228,7 +1230,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
checkARCPropertyImpl(*this, PropertyLoc, property, Ivar);
} else if (PropertyIvar)
// @dynamic
- Diag(PropertyDiagLoc, diag::error_dynamic_property_ivar_decl);
+ Diag(PropertyDiagLoc, diag::err_dynamic_property_ivar_decl);
assert (property && "ActOnPropertyImplDecl - property declaration missing");
ObjCPropertyImplDecl *PIDecl =
@@ -1348,7 +1350,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (Synthesize)
if (ObjCPropertyImplDecl *PPIDecl =
IC->FindPropertyImplIvarDecl(PropertyIvar)) {
- Diag(PropertyLoc, diag::error_duplicate_ivar_use)
+ Diag(PropertyLoc, diag::err_duplicate_ivar_use)
<< PropertyId << PPIDecl->getPropertyDecl()->getIdentifier()
<< PropertyIvar;
Diag(PPIDecl->getLocation(), diag::note_previous_use);
@@ -1356,7 +1358,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (ObjCPropertyImplDecl *PPIDecl
= IC->FindPropertyImplDecl(PropertyId, QueryKind)) {
- Diag(PropertyLoc, diag::error_property_implemented) << PropertyId;
+ Diag(PropertyLoc, diag::err_property_implemented) << PropertyId;
Diag(PPIDecl->getLocation(), diag::note_previous_declaration);
return nullptr;
}
@@ -1387,7 +1389,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (Synthesize)
if (ObjCPropertyImplDecl *PPIDecl =
CatImplClass->FindPropertyImplIvarDecl(PropertyIvar)) {
- Diag(PropertyDiagLoc, diag::error_duplicate_ivar_use)
+ Diag(PropertyDiagLoc, diag::err_duplicate_ivar_use)
<< PropertyId << PPIDecl->getPropertyDecl()->getIdentifier()
<< PropertyIvar;
Diag(PPIDecl->getLocation(), diag::note_previous_use);
@@ -1395,7 +1397,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
if (ObjCPropertyImplDecl *PPIDecl =
CatImplClass->FindPropertyImplDecl(PropertyId, QueryKind)) {
- Diag(PropertyDiagLoc, diag::error_property_implemented) << PropertyId;
+ Diag(PropertyDiagLoc, diag::err_property_implemented) << PropertyId;
Diag(PPIDecl->getLocation(), diag::note_previous_declaration);
return nullptr;
}
@@ -1505,7 +1507,7 @@ bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
compat = Context.canAssignObjCInterfaces(getterObjCPtr, propertyObjCPtr);
else if (CheckAssignmentConstraints(Loc, GetterType, PropertyRValueType)
!= Compatible) {
- Diag(Loc, diag::error_property_accessor_type)
+ Diag(Loc, diag::err_property_accessor_type)
<< property->getDeclName() << PropertyRValueType
<< GetterMethod->getSelector() << GetterType;
Diag(GetterMethod->getLocation(), diag::note_declared_at);
diff --git a/lib/Sema/SemaOpenMP.cpp b/lib/Sema/SemaOpenMP.cpp
index b7ac48583e1a..804aadc0ff77 100644
--- a/lib/Sema/SemaOpenMP.cpp
+++ b/lib/Sema/SemaOpenMP.cpp
@@ -72,8 +72,13 @@ private:
typedef llvm::DenseMap<ValueDecl *, Expr *> AlignedMapTy;
typedef std::pair<unsigned, VarDecl *> LCDeclInfo;
typedef llvm::DenseMap<ValueDecl *, LCDeclInfo> LoopControlVariablesMapTy;
- typedef llvm::DenseMap<
- ValueDecl *, OMPClauseMappableExprCommon::MappableExprComponentLists>
+ /// Struct that associates a component with the clause kind where they are
+ /// found.
+ struct MappedExprComponentTy {
+ OMPClauseMappableExprCommon::MappableExprComponentLists Components;
+ OpenMPClauseKind Kind = OMPC_unknown;
+ };
+ typedef llvm::DenseMap<ValueDecl *, MappedExprComponentTy>
MappedExprComponentsTy;
typedef llvm::StringMap<std::pair<OMPCriticalDirective *, llvm::APSInt>>
CriticalsWithHintsTy;
@@ -123,7 +128,7 @@ private:
typedef SmallVector<SharingMapTy, 8>::reverse_iterator reverse_iterator;
- DSAVarData getDSA(StackTy::reverse_iterator& Iter, ValueDecl *D);
+ DSAVarData getDSA(StackTy::reverse_iterator &Iter, ValueDecl *D);
/// \brief Checks if the variable is a local for OpenMP region.
bool isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter);
@@ -293,9 +298,7 @@ public:
Stack[Stack.size() - 2].CancelRegion || Cancel;
}
/// \brief Return true if current region has inner cancel construct.
- bool isCancelRegion() const {
- return Stack.back().CancelRegion;
- }
+ bool isCancelRegion() const { return Stack.back().CancelRegion; }
/// \brief Set collapse value for the region.
void setAssociatedLoops(unsigned Val) { Stack.back().AssociatedLoops = Val; }
@@ -323,12 +326,13 @@ public:
Scope *getCurScope() { return Stack.back().CurScope; }
SourceLocation getConstructLoc() { return Stack.back().ConstructLoc; }
- // Do the check specified in \a Check to all component lists and return true
- // if any issue is found.
+ /// Do the check specified in \a Check to all component lists and return true
+ /// if any issue is found.
bool checkMappableExprComponentListsForDecl(
ValueDecl *VD, bool CurrentRegionOnly,
- const llvm::function_ref<bool(
- OMPClauseMappableExprCommon::MappableExprComponentListRef)> &Check) {
+ const llvm::function_ref<
+ bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPClauseKind)> &Check) {
auto SI = Stack.rbegin();
auto SE = Stack.rend();
@@ -344,24 +348,26 @@ public:
for (; SI != SE; ++SI) {
auto MI = SI->MappedExprComponents.find(VD);
if (MI != SI->MappedExprComponents.end())
- for (auto &L : MI->second)
- if (Check(L))
+ for (auto &L : MI->second.Components)
+ if (Check(L, MI->second.Kind))
return true;
}
return false;
}
- // Create a new mappable expression component list associated with a given
- // declaration and initialize it with the provided list of components.
+ /// Create a new mappable expression component list associated with a given
+ /// declaration and initialize it with the provided list of components.
void addMappableExpressionComponents(
ValueDecl *VD,
- OMPClauseMappableExprCommon::MappableExprComponentListRef Components) {
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
+ OpenMPClauseKind WhereFoundClauseKind) {
assert(Stack.size() > 1 &&
"Not expecting to retrieve components from a empty stack!");
auto &MEC = Stack.back().MappedExprComponents[VD];
// Create new entry and append the new components there.
- MEC.resize(MEC.size() + 1);
- MEC.back().append(Components.begin(), Components.end());
+ MEC.Components.resize(MEC.Components.size() + 1);
+ MEC.Components.back().append(Components.begin(), Components.end());
+ MEC.Kind = WhereFoundClauseKind;
}
unsigned getNestingLevel() const {
@@ -393,7 +399,7 @@ bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
static ValueDecl *getCanonicalDecl(ValueDecl *D) {
auto *VD = dyn_cast<VarDecl>(D);
auto *FD = dyn_cast<FieldDecl>(D);
- if (VD != nullptr) {
+ if (VD != nullptr) {
VD = VD->getCanonicalDecl();
D = VD;
} else {
@@ -404,7 +410,7 @@ static ValueDecl *getCanonicalDecl(ValueDecl *D) {
return D;
}
-DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator& Iter,
+DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
ValueDecl *D) {
D = getCanonicalDecl(D);
auto *VD = dyn_cast<VarDecl>(D);
@@ -771,18 +777,12 @@ DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
D = getCanonicalDecl(D);
auto StartI = std::next(Stack.rbegin());
auto EndI = Stack.rend();
- if (FromParent && StartI != EndI) {
+ if (FromParent && StartI != EndI)
StartI = std::next(StartI);
- }
- for (auto I = StartI, EE = EndI; I != EE; ++I) {
- if (!DPred(I->Directive))
- break;
- DSAVarData DVar = getDSA(I, D);
- if (CPred(DVar.CKind))
- return DVar;
+ if (StartI == EndI || !DPred(StartI->Directive))
return DSAVarData();
- }
- return DSAVarData();
+ DSAVarData DVar = getDSA(StartI, D);
+ return CPred(DVar.CKind) ? DVar : DSAVarData();
}
bool DSAStackTy::hasExplicitDSA(
@@ -903,7 +903,6 @@ bool Sema::IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level) {
// array section, the runtime library may pass the NULL value to the
// device instead of the value passed to it by the compiler.
-
if (Ty->isReferenceType())
Ty = Ty->castAs<ReferenceType>()->getPointeeType();
@@ -916,7 +915,13 @@ bool Sema::IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level) {
DSAStack->checkMappableExprComponentListsForDecl(
D, /*CurrentRegionOnly=*/true,
[&](OMPClauseMappableExprCommon::MappableExprComponentListRef
- MapExprComponents) {
+ MapExprComponents,
+ OpenMPClauseKind WhereFoundClauseKind) {
+ // Only the map clause information influences how a variable is
+ // captured. E.g. is_device_ptr does not require changing the default
+ // behavior.
+ if (WhereFoundClauseKind != OMPC_map)
+ return false;
auto EI = MapExprComponents.rbegin();
auto EE = MapExprComponents.rend();
@@ -1062,7 +1067,7 @@ void Sema::EndOpenMPDSABlock(Stmt *CurDirective) {
// clause requires an accessible, unambiguous default constructor for the
// class type, unless the list item is also specified in a firstprivate
// clause.
- if (auto D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
+ if (auto *D = dyn_cast_or_null<OMPExecutableDirective>(CurDirective)) {
for (auto *C : D->clauses()) {
if (auto *Clause = dyn_cast<OMPLastprivateClause>(C)) {
SmallVector<Expr *, 8> PrivateCopies;
@@ -1121,7 +1126,7 @@ public:
explicit VarDeclFilterCCC(Sema &S) : SemaRef(S) {}
bool ValidateCandidate(const TypoCorrection &Candidate) override {
NamedDecl *ND = Candidate.getCorrectionDecl();
- if (VarDecl *VD = dyn_cast_or_null<VarDecl>(ND)) {
+ if (auto *VD = dyn_cast_or_null<VarDecl>(ND)) {
return VD->hasGlobalStorage() &&
SemaRef.isDeclInScope(ND, SemaRef.getCurLexicalContext(),
SemaRef.getCurScope());
@@ -1290,7 +1295,7 @@ class LocalVarRefChecker : public ConstStmtVisitor<LocalVarRefChecker, bool> {
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
- if (auto VD = dyn_cast<VarDecl>(E->getDecl())) {
+ if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
if (VD->hasLocalStorage()) {
SemaRef.Diag(E->getLocStart(),
diag::err_omp_local_var_in_threadprivate_init)
@@ -1471,7 +1476,8 @@ public:
auto DVar = Stack->getTopDSA(VD, false);
// Check if the variable has explicit DSA set and stop analysis if it so.
- if (DVar.RefExpr) return;
+ if (DVar.RefExpr)
+ return;
auto ELoc = E->getExprLoc();
auto DKind = Stack->getCurrentDirective();
@@ -1550,7 +1556,8 @@ public:
!Stack->isLoopControlVariable(FD).first)
ImplicitFirstprivate.push_back(E);
}
- }
+ } else
+ Visit(E->getBase());
}
void VisitOMPExecutableDirective(OMPExecutableDirective *S) {
for (auto *C : S->clauses()) {
@@ -1587,7 +1594,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
- case OMPD_teams: {
+ case OMPD_teams:
+ case OMPD_target_teams: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
@@ -1616,7 +1624,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_target:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
- case OMPD_target_parallel_for_simd: {
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_simd: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -1685,7 +1694,13 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
}
case OMPD_distribute_parallel_for_simd:
case OMPD_distribute_simd:
- case OMPD_distribute_parallel_for: {
+ case OMPD_distribute_parallel_for:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_parallel_for: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1);
QualType KmpInt32PtrTy =
Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
@@ -1740,7 +1755,8 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
}
WithInit = true;
}
- auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty);
+ auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
+ CaptureExpr->getLocStart());
if (!WithInit)
CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C, SourceRange()));
S.CurContext->addHiddenDecl(CED);
@@ -1868,1241 +1884,12 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
const DeclarationNameInfo &CurrentName,
OpenMPDirectiveKind CancelRegion,
SourceLocation StartLoc) {
- // Allowed nesting of constructs
- // +------------------+-----------------+------------------------------------+
- // | Parent directive | Child directive | Closely (!), No-Closely(+), Both(*)|
- // +------------------+-----------------+------------------------------------+
- // | parallel | parallel | * |
- // | parallel | for | * |
- // | parallel | for simd | * |
- // | parallel | master | * |
- // | parallel | critical | * |
- // | parallel | simd | * |
- // | parallel | sections | * |
- // | parallel | section | + |
- // | parallel | single | * |
- // | parallel | parallel for | * |
- // | parallel |parallel for simd| * |
- // | parallel |parallel sections| * |
- // | parallel | task | * |
- // | parallel | taskyield | * |
- // | parallel | barrier | * |
- // | parallel | taskwait | * |
- // | parallel | taskgroup | * |
- // | parallel | flush | * |
- // | parallel | ordered | + |
- // | parallel | atomic | * |
- // | parallel | target | * |
- // | parallel | target parallel | * |
- // | parallel | target parallel | * |
- // | | for | |
- // | parallel | target enter | * |
- // | | data | |
- // | parallel | target exit | * |
- // | | data | |
- // | parallel | teams | + |
- // | parallel | cancellation | |
- // | | point | ! |
- // | parallel | cancel | ! |
- // | parallel | taskloop | * |
- // | parallel | taskloop simd | * |
- // | parallel | distribute | + |
- // | parallel | distribute | + |
- // | | parallel for | |
- // | parallel | distribute | + |
- // | |parallel for simd| |
- // | parallel | distribute simd | + |
- // +------------------+-----------------+------------------------------------+
- // | for | parallel | * |
- // | for | for | + |
- // | for | for simd | + |
- // | for | master | + |
- // | for | critical | * |
- // | for | simd | * |
- // | for | sections | + |
- // | for | section | + |
- // | for | single | + |
- // | for | parallel for | * |
- // | for |parallel for simd| * |
- // | for |parallel sections| * |
- // | for | task | * |
- // | for | taskyield | * |
- // | for | barrier | + |
- // | for | taskwait | * |
- // | for | taskgroup | * |
- // | for | flush | * |
- // | for | ordered | * (if construct is ordered) |
- // | for | atomic | * |
- // | for | target | * |
- // | for | target parallel | * |
- // | for | target parallel | * |
- // | | for | |
- // | for | target enter | * |
- // | | data | |
- // | for | target exit | * |
- // | | data | |
- // | for | teams | + |
- // | for | cancellation | |
- // | | point | ! |
- // | for | cancel | ! |
- // | for | taskloop | * |
- // | for | taskloop simd | * |
- // | for | distribute | + |
- // | for | distribute | + |
- // | | parallel for | |
- // | for | distribute | + |
- // | |parallel for simd| |
- // | for | distribute simd | + |
- // | for | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | master | parallel | * |
- // | master | for | + |
- // | master | for simd | + |
- // | master | master | * |
- // | master | critical | * |
- // | master | simd | * |
- // | master | sections | + |
- // | master | section | + |
- // | master | single | + |
- // | master | parallel for | * |
- // | master |parallel for simd| * |
- // | master |parallel sections| * |
- // | master | task | * |
- // | master | taskyield | * |
- // | master | barrier | + |
- // | master | taskwait | * |
- // | master | taskgroup | * |
- // | master | flush | * |
- // | master | ordered | + |
- // | master | atomic | * |
- // | master | target | * |
- // | master | target parallel | * |
- // | master | target parallel | * |
- // | | for | |
- // | master | target enter | * |
- // | | data | |
- // | master | target exit | * |
- // | | data | |
- // | master | teams | + |
- // | master | cancellation | |
- // | | point | |
- // | master | cancel | |
- // | master | taskloop | * |
- // | master | taskloop simd | * |
- // | master | distribute | + |
- // | master | distribute | + |
- // | | parallel for | |
- // | master | distribute | + |
- // | |parallel for simd| |
- // | master | distribute simd | + |
- // | master | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | critical | parallel | * |
- // | critical | for | + |
- // | critical | for simd | + |
- // | critical | master | * |
- // | critical | critical | * (should have different names) |
- // | critical | simd | * |
- // | critical | sections | + |
- // | critical | section | + |
- // | critical | single | + |
- // | critical | parallel for | * |
- // | critical |parallel for simd| * |
- // | critical |parallel sections| * |
- // | critical | task | * |
- // | critical | taskyield | * |
- // | critical | barrier | + |
- // | critical | taskwait | * |
- // | critical | taskgroup | * |
- // | critical | ordered | + |
- // | critical | atomic | * |
- // | critical | target | * |
- // | critical | target parallel | * |
- // | critical | target parallel | * |
- // | | for | |
- // | critical | target enter | * |
- // | | data | |
- // | critical | target exit | * |
- // | | data | |
- // | critical | teams | + |
- // | critical | cancellation | |
- // | | point | |
- // | critical | cancel | |
- // | critical | taskloop | * |
- // | critical | taskloop simd | * |
- // | critical | distribute | + |
- // | critical | distribute | + |
- // | | parallel for | |
- // | critical | distribute | + |
- // | |parallel for simd| |
- // | critical | distribute simd | + |
- // | critical | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | simd | parallel | |
- // | simd | for | |
- // | simd | for simd | |
- // | simd | master | |
- // | simd | critical | |
- // | simd | simd | * |
- // | simd | sections | |
- // | simd | section | |
- // | simd | single | |
- // | simd | parallel for | |
- // | simd |parallel for simd| |
- // | simd |parallel sections| |
- // | simd | task | |
- // | simd | taskyield | |
- // | simd | barrier | |
- // | simd | taskwait | |
- // | simd | taskgroup | |
- // | simd | flush | |
- // | simd | ordered | + (with simd clause) |
- // | simd | atomic | |
- // | simd | target | |
- // | simd | target parallel | |
- // | simd | target parallel | |
- // | | for | |
- // | simd | target enter | |
- // | | data | |
- // | simd | target exit | |
- // | | data | |
- // | simd | teams | |
- // | simd | cancellation | |
- // | | point | |
- // | simd | cancel | |
- // | simd | taskloop | |
- // | simd | taskloop simd | |
- // | simd | distribute | |
- // | simd | distribute | |
- // | | parallel for | |
- // | simd | distribute | |
- // | |parallel for simd| |
- // | simd | distribute simd | |
- // | simd | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | for simd | parallel | |
- // | for simd | for | |
- // | for simd | for simd | |
- // | for simd | master | |
- // | for simd | critical | |
- // | for simd | simd | * |
- // | for simd | sections | |
- // | for simd | section | |
- // | for simd | single | |
- // | for simd | parallel for | |
- // | for simd |parallel for simd| |
- // | for simd |parallel sections| |
- // | for simd | task | |
- // | for simd | taskyield | |
- // | for simd | barrier | |
- // | for simd | taskwait | |
- // | for simd | taskgroup | |
- // | for simd | flush | |
- // | for simd | ordered | + (with simd clause) |
- // | for simd | atomic | |
- // | for simd | target | |
- // | for simd | target parallel | |
- // | for simd | target parallel | |
- // | | for | |
- // | for simd | target enter | |
- // | | data | |
- // | for simd | target exit | |
- // | | data | |
- // | for simd | teams | |
- // | for simd | cancellation | |
- // | | point | |
- // | for simd | cancel | |
- // | for simd | taskloop | |
- // | for simd | taskloop simd | |
- // | for simd | distribute | |
- // | for simd | distribute | |
- // | | parallel for | |
- // | for simd | distribute | |
- // | |parallel for simd| |
- // | for simd | distribute simd | |
- // | for simd | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | parallel for simd| parallel | |
- // | parallel for simd| for | |
- // | parallel for simd| for simd | |
- // | parallel for simd| master | |
- // | parallel for simd| critical | |
- // | parallel for simd| simd | * |
- // | parallel for simd| sections | |
- // | parallel for simd| section | |
- // | parallel for simd| single | |
- // | parallel for simd| parallel for | |
- // | parallel for simd|parallel for simd| |
- // | parallel for simd|parallel sections| |
- // | parallel for simd| task | |
- // | parallel for simd| taskyield | |
- // | parallel for simd| barrier | |
- // | parallel for simd| taskwait | |
- // | parallel for simd| taskgroup | |
- // | parallel for simd| flush | |
- // | parallel for simd| ordered | + (with simd clause) |
- // | parallel for simd| atomic | |
- // | parallel for simd| target | |
- // | parallel for simd| target parallel | |
- // | parallel for simd| target parallel | |
- // | | for | |
- // | parallel for simd| target enter | |
- // | | data | |
- // | parallel for simd| target exit | |
- // | | data | |
- // | parallel for simd| teams | |
- // | parallel for simd| cancellation | |
- // | | point | |
- // | parallel for simd| cancel | |
- // | parallel for simd| taskloop | |
- // | parallel for simd| taskloop simd | |
- // | parallel for simd| distribute | |
- // | parallel for simd| distribute | |
- // | | parallel for | |
- // | parallel for simd| distribute | |
- // | |parallel for simd| |
- // | parallel for simd| distribute simd | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | sections | parallel | * |
- // | sections | for | + |
- // | sections | for simd | + |
- // | sections | master | + |
- // | sections | critical | * |
- // | sections | simd | * |
- // | sections | sections | + |
- // | sections | section | * |
- // | sections | single | + |
- // | sections | parallel for | * |
- // | sections |parallel for simd| * |
- // | sections |parallel sections| * |
- // | sections | task | * |
- // | sections | taskyield | * |
- // | sections | barrier | + |
- // | sections | taskwait | * |
- // | sections | taskgroup | * |
- // | sections | flush | * |
- // | sections | ordered | + |
- // | sections | atomic | * |
- // | sections | target | * |
- // | sections | target parallel | * |
- // | sections | target parallel | * |
- // | | for | |
- // | sections | target enter | * |
- // | | data | |
- // | sections | target exit | * |
- // | | data | |
- // | sections | teams | + |
- // | sections | cancellation | |
- // | | point | ! |
- // | sections | cancel | ! |
- // | sections | taskloop | * |
- // | sections | taskloop simd | * |
- // | sections | distribute | + |
- // | sections | distribute | + |
- // | | parallel for | |
- // | sections | distribute | + |
- // | |parallel for simd| |
- // | sections | distribute simd | + |
- // | sections | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | section | parallel | * |
- // | section | for | + |
- // | section | for simd | + |
- // | section | master | + |
- // | section | critical | * |
- // | section | simd | * |
- // | section | sections | + |
- // | section | section | + |
- // | section | single | + |
- // | section | parallel for | * |
- // | section |parallel for simd| * |
- // | section |parallel sections| * |
- // | section | task | * |
- // | section | taskyield | * |
- // | section | barrier | + |
- // | section | taskwait | * |
- // | section | taskgroup | * |
- // | section | flush | * |
- // | section | ordered | + |
- // | section | atomic | * |
- // | section | target | * |
- // | section | target parallel | * |
- // | section | target parallel | * |
- // | | for | |
- // | section | target enter | * |
- // | | data | |
- // | section | target exit | * |
- // | | data | |
- // | section | teams | + |
- // | section | cancellation | |
- // | | point | ! |
- // | section | cancel | ! |
- // | section | taskloop | * |
- // | section | taskloop simd | * |
- // | section | distribute | + |
- // | section | distribute | + |
- // | | parallel for | |
- // | section | distribute | + |
- // | |parallel for simd| |
- // | section | distribute simd | + |
- // | section | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | single | parallel | * |
- // | single | for | + |
- // | single | for simd | + |
- // | single | master | + |
- // | single | critical | * |
- // | single | simd | * |
- // | single | sections | + |
- // | single | section | + |
- // | single | single | + |
- // | single | parallel for | * |
- // | single |parallel for simd| * |
- // | single |parallel sections| * |
- // | single | task | * |
- // | single | taskyield | * |
- // | single | barrier | + |
- // | single | taskwait | * |
- // | single | taskgroup | * |
- // | single | flush | * |
- // | single | ordered | + |
- // | single | atomic | * |
- // | single | target | * |
- // | single | target parallel | * |
- // | single | target parallel | * |
- // | | for | |
- // | single | target enter | * |
- // | | data | |
- // | single | target exit | * |
- // | | data | |
- // | single | teams | + |
- // | single | cancellation | |
- // | | point | |
- // | single | cancel | |
- // | single | taskloop | * |
- // | single | taskloop simd | * |
- // | single | distribute | + |
- // | single | distribute | + |
- // | | parallel for | |
- // | single | distribute | + |
- // | |parallel for simd| |
- // | single | distribute simd | + |
- // | single | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | parallel for | parallel | * |
- // | parallel for | for | + |
- // | parallel for | for simd | + |
- // | parallel for | master | + |
- // | parallel for | critical | * |
- // | parallel for | simd | * |
- // | parallel for | sections | + |
- // | parallel for | section | + |
- // | parallel for | single | + |
- // | parallel for | parallel for | * |
- // | parallel for |parallel for simd| * |
- // | parallel for |parallel sections| * |
- // | parallel for | task | * |
- // | parallel for | taskyield | * |
- // | parallel for | barrier | + |
- // | parallel for | taskwait | * |
- // | parallel for | taskgroup | * |
- // | parallel for | flush | * |
- // | parallel for | ordered | * (if construct is ordered) |
- // | parallel for | atomic | * |
- // | parallel for | target | * |
- // | parallel for | target parallel | * |
- // | parallel for | target parallel | * |
- // | | for | |
- // | parallel for | target enter | * |
- // | | data | |
- // | parallel for | target exit | * |
- // | | data | |
- // | parallel for | teams | + |
- // | parallel for | cancellation | |
- // | | point | ! |
- // | parallel for | cancel | ! |
- // | parallel for | taskloop | * |
- // | parallel for | taskloop simd | * |
- // | parallel for | distribute | + |
- // | parallel for | distribute | + |
- // | | parallel for | |
- // | parallel for | distribute | + |
- // | |parallel for simd| |
- // | parallel for | distribute simd | + |
- // | parallel for | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | parallel sections| parallel | * |
- // | parallel sections| for | + |
- // | parallel sections| for simd | + |
- // | parallel sections| master | + |
- // | parallel sections| critical | + |
- // | parallel sections| simd | * |
- // | parallel sections| sections | + |
- // | parallel sections| section | * |
- // | parallel sections| single | + |
- // | parallel sections| parallel for | * |
- // | parallel sections|parallel for simd| * |
- // | parallel sections|parallel sections| * |
- // | parallel sections| task | * |
- // | parallel sections| taskyield | * |
- // | parallel sections| barrier | + |
- // | parallel sections| taskwait | * |
- // | parallel sections| taskgroup | * |
- // | parallel sections| flush | * |
- // | parallel sections| ordered | + |
- // | parallel sections| atomic | * |
- // | parallel sections| target | * |
- // | parallel sections| target parallel | * |
- // | parallel sections| target parallel | * |
- // | | for | |
- // | parallel sections| target enter | * |
- // | | data | |
- // | parallel sections| target exit | * |
- // | | data | |
- // | parallel sections| teams | + |
- // | parallel sections| cancellation | |
- // | | point | ! |
- // | parallel sections| cancel | ! |
- // | parallel sections| taskloop | * |
- // | parallel sections| taskloop simd | * |
- // | parallel sections| distribute | + |
- // | parallel sections| distribute | + |
- // | | parallel for | |
- // | parallel sections| distribute | + |
- // | |parallel for simd| |
- // | parallel sections| distribute simd | + |
- // | parallel sections| target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | task | parallel | * |
- // | task | for | + |
- // | task | for simd | + |
- // | task | master | + |
- // | task | critical | * |
- // | task | simd | * |
- // | task | sections | + |
- // | task | section | + |
- // | task | single | + |
- // | task | parallel for | * |
- // | task |parallel for simd| * |
- // | task |parallel sections| * |
- // | task | task | * |
- // | task | taskyield | * |
- // | task | barrier | + |
- // | task | taskwait | * |
- // | task | taskgroup | * |
- // | task | flush | * |
- // | task | ordered | + |
- // | task | atomic | * |
- // | task | target | * |
- // | task | target parallel | * |
- // | task | target parallel | * |
- // | | for | |
- // | task | target enter | * |
- // | | data | |
- // | task | target exit | * |
- // | | data | |
- // | task | teams | + |
- // | task | cancellation | |
- // | | point | ! |
- // | task | cancel | ! |
- // | task | taskloop | * |
- // | task | taskloop simd | * |
- // | task | distribute | + |
- // | task | distribute | + |
- // | | parallel for | |
- // | task | distribute | + |
- // | |parallel for simd| |
- // | task | distribute simd | + |
- // | task | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | ordered | parallel | * |
- // | ordered | for | + |
- // | ordered | for simd | + |
- // | ordered | master | * |
- // | ordered | critical | * |
- // | ordered | simd | * |
- // | ordered | sections | + |
- // | ordered | section | + |
- // | ordered | single | + |
- // | ordered | parallel for | * |
- // | ordered |parallel for simd| * |
- // | ordered |parallel sections| * |
- // | ordered | task | * |
- // | ordered | taskyield | * |
- // | ordered | barrier | + |
- // | ordered | taskwait | * |
- // | ordered | taskgroup | * |
- // | ordered | flush | * |
- // | ordered | ordered | + |
- // | ordered | atomic | * |
- // | ordered | target | * |
- // | ordered | target parallel | * |
- // | ordered | target parallel | * |
- // | | for | |
- // | ordered | target enter | * |
- // | | data | |
- // | ordered | target exit | * |
- // | | data | |
- // | ordered | teams | + |
- // | ordered | cancellation | |
- // | | point | |
- // | ordered | cancel | |
- // | ordered | taskloop | * |
- // | ordered | taskloop simd | * |
- // | ordered | distribute | + |
- // | ordered | distribute | + |
- // | | parallel for | |
- // | ordered | distribute | + |
- // | |parallel for simd| |
- // | ordered | distribute simd | + |
- // | ordered | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | atomic | parallel | |
- // | atomic | for | |
- // | atomic | for simd | |
- // | atomic | master | |
- // | atomic | critical | |
- // | atomic | simd | |
- // | atomic | sections | |
- // | atomic | section | |
- // | atomic | single | |
- // | atomic | parallel for | |
- // | atomic |parallel for simd| |
- // | atomic |parallel sections| |
- // | atomic | task | |
- // | atomic | taskyield | |
- // | atomic | barrier | |
- // | atomic | taskwait | |
- // | atomic | taskgroup | |
- // | atomic | flush | |
- // | atomic | ordered | |
- // | atomic | atomic | |
- // | atomic | target | |
- // | atomic | target parallel | |
- // | atomic | target parallel | |
- // | | for | |
- // | atomic | target enter | |
- // | | data | |
- // | atomic | target exit | |
- // | | data | |
- // | atomic | teams | |
- // | atomic | cancellation | |
- // | | point | |
- // | atomic | cancel | |
- // | atomic | taskloop | |
- // | atomic | taskloop simd | |
- // | atomic | distribute | |
- // | atomic | distribute | |
- // | | parallel for | |
- // | atomic | distribute | |
- // | |parallel for simd| |
- // | atomic | distribute simd | |
- // | atomic | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | target | parallel | * |
- // | target | for | * |
- // | target | for simd | * |
- // | target | master | * |
- // | target | critical | * |
- // | target | simd | * |
- // | target | sections | * |
- // | target | section | * |
- // | target | single | * |
- // | target | parallel for | * |
- // | target |parallel for simd| * |
- // | target |parallel sections| * |
- // | target | task | * |
- // | target | taskyield | * |
- // | target | barrier | * |
- // | target | taskwait | * |
- // | target | taskgroup | * |
- // | target | flush | * |
- // | target | ordered | * |
- // | target | atomic | * |
- // | target | target | |
- // | target | target parallel | |
- // | target | target parallel | |
- // | | for | |
- // | target | target enter | |
- // | | data | |
- // | target | target exit | |
- // | | data | |
- // | target | teams | * |
- // | target | cancellation | |
- // | | point | |
- // | target | cancel | |
- // | target | taskloop | * |
- // | target | taskloop simd | * |
- // | target | distribute | + |
- // | target | distribute | + |
- // | | parallel for | |
- // | target | distribute | + |
- // | |parallel for simd| |
- // | target | distribute simd | + |
- // | target | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | target parallel | parallel | * |
- // | target parallel | for | * |
- // | target parallel | for simd | * |
- // | target parallel | master | * |
- // | target parallel | critical | * |
- // | target parallel | simd | * |
- // | target parallel | sections | * |
- // | target parallel | section | * |
- // | target parallel | single | * |
- // | target parallel | parallel for | * |
- // | target parallel |parallel for simd| * |
- // | target parallel |parallel sections| * |
- // | target parallel | task | * |
- // | target parallel | taskyield | * |
- // | target parallel | barrier | * |
- // | target parallel | taskwait | * |
- // | target parallel | taskgroup | * |
- // | target parallel | flush | * |
- // | target parallel | ordered | * |
- // | target parallel | atomic | * |
- // | target parallel | target | |
- // | target parallel | target parallel | |
- // | target parallel | target parallel | |
- // | | for | |
- // | target parallel | target enter | |
- // | | data | |
- // | target parallel | target exit | |
- // | | data | |
- // | target parallel | teams | |
- // | target parallel | cancellation | |
- // | | point | ! |
- // | target parallel | cancel | ! |
- // | target parallel | taskloop | * |
- // | target parallel | taskloop simd | * |
- // | target parallel | distribute | |
- // | target parallel | distribute | |
- // | | parallel for | |
- // | target parallel | distribute | |
- // | |parallel for simd| |
- // | target parallel | distribute simd | |
- // | target parallel | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | target parallel | parallel | * |
- // | for | | |
- // | target parallel | for | * |
- // | for | | |
- // | target parallel | for simd | * |
- // | for | | |
- // | target parallel | master | * |
- // | for | | |
- // | target parallel | critical | * |
- // | for | | |
- // | target parallel | simd | * |
- // | for | | |
- // | target parallel | sections | * |
- // | for | | |
- // | target parallel | section | * |
- // | for | | |
- // | target parallel | single | * |
- // | for | | |
- // | target parallel | parallel for | * |
- // | for | | |
- // | target parallel |parallel for simd| * |
- // | for | | |
- // | target parallel |parallel sections| * |
- // | for | | |
- // | target parallel | task | * |
- // | for | | |
- // | target parallel | taskyield | * |
- // | for | | |
- // | target parallel | barrier | * |
- // | for | | |
- // | target parallel | taskwait | * |
- // | for | | |
- // | target parallel | taskgroup | * |
- // | for | | |
- // | target parallel | flush | * |
- // | for | | |
- // | target parallel | ordered | * |
- // | for | | |
- // | target parallel | atomic | * |
- // | for | | |
- // | target parallel | target | |
- // | for | | |
- // | target parallel | target parallel | |
- // | for | | |
- // | target parallel | target parallel | |
- // | for | for | |
- // | target parallel | target enter | |
- // | for | data | |
- // | target parallel | target exit | |
- // | for | data | |
- // | target parallel | teams | |
- // | for | | |
- // | target parallel | cancellation | |
- // | for | point | ! |
- // | target parallel | cancel | ! |
- // | for | | |
- // | target parallel | taskloop | * |
- // | for | | |
- // | target parallel | taskloop simd | * |
- // | for | | |
- // | target parallel | distribute | |
- // | for | | |
- // | target parallel | distribute | |
- // | for | parallel for | |
- // | target parallel | distribute | |
- // | for |parallel for simd| |
- // | target parallel | distribute simd | |
- // | for | | |
- // | target parallel | target parallel | |
- // | for | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | teams | parallel | * |
- // | teams | for | + |
- // | teams | for simd | + |
- // | teams | master | + |
- // | teams | critical | + |
- // | teams | simd | + |
- // | teams | sections | + |
- // | teams | section | + |
- // | teams | single | + |
- // | teams | parallel for | * |
- // | teams |parallel for simd| * |
- // | teams |parallel sections| * |
- // | teams | task | + |
- // | teams | taskyield | + |
- // | teams | barrier | + |
- // | teams | taskwait | + |
- // | teams | taskgroup | + |
- // | teams | flush | + |
- // | teams | ordered | + |
- // | teams | atomic | + |
- // | teams | target | + |
- // | teams | target parallel | + |
- // | teams | target parallel | + |
- // | | for | |
- // | teams | target enter | + |
- // | | data | |
- // | teams | target exit | + |
- // | | data | |
- // | teams | teams | + |
- // | teams | cancellation | |
- // | | point | |
- // | teams | cancel | |
- // | teams | taskloop | + |
- // | teams | taskloop simd | + |
- // | teams | distribute | ! |
- // | teams | distribute | ! |
- // | | parallel for | |
- // | teams | distribute | ! |
- // | |parallel for simd| |
- // | teams | distribute simd | ! |
- // | teams | target parallel | + |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | taskloop | parallel | * |
- // | taskloop | for | + |
- // | taskloop | for simd | + |
- // | taskloop | master | + |
- // | taskloop | critical | * |
- // | taskloop | simd | * |
- // | taskloop | sections | + |
- // | taskloop | section | + |
- // | taskloop | single | + |
- // | taskloop | parallel for | * |
- // | taskloop |parallel for simd| * |
- // | taskloop |parallel sections| * |
- // | taskloop | task | * |
- // | taskloop | taskyield | * |
- // | taskloop | barrier | + |
- // | taskloop | taskwait | * |
- // | taskloop | taskgroup | * |
- // | taskloop | flush | * |
- // | taskloop | ordered | + |
- // | taskloop | atomic | * |
- // | taskloop | target | * |
- // | taskloop | target parallel | * |
- // | taskloop | target parallel | * |
- // | | for | |
- // | taskloop | target enter | * |
- // | | data | |
- // | taskloop | target exit | * |
- // | | data | |
- // | taskloop | teams | + |
- // | taskloop | cancellation | |
- // | | point | |
- // | taskloop | cancel | |
- // | taskloop | taskloop | * |
- // | taskloop | distribute | + |
- // | taskloop | distribute | + |
- // | | parallel for | |
- // | taskloop | distribute | + |
- // | |parallel for simd| |
- // | taskloop | distribute simd | + |
- // | taskloop | target parallel | * |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | taskloop simd | parallel | |
- // | taskloop simd | for | |
- // | taskloop simd | for simd | |
- // | taskloop simd | master | |
- // | taskloop simd | critical | |
- // | taskloop simd | simd | * |
- // | taskloop simd | sections | |
- // | taskloop simd | section | |
- // | taskloop simd | single | |
- // | taskloop simd | parallel for | |
- // | taskloop simd |parallel for simd| |
- // | taskloop simd |parallel sections| |
- // | taskloop simd | task | |
- // | taskloop simd | taskyield | |
- // | taskloop simd | barrier | |
- // | taskloop simd | taskwait | |
- // | taskloop simd | taskgroup | |
- // | taskloop simd | flush | |
- // | taskloop simd | ordered | + (with simd clause) |
- // | taskloop simd | atomic | |
- // | taskloop simd | target | |
- // | taskloop simd | target parallel | |
- // | taskloop simd | target parallel | |
- // | | for | |
- // | taskloop simd | target enter | |
- // | | data | |
- // | taskloop simd | target exit | |
- // | | data | |
- // | taskloop simd | teams | |
- // | taskloop simd | cancellation | |
- // | | point | |
- // | taskloop simd | cancel | |
- // | taskloop simd | taskloop | |
- // | taskloop simd | taskloop simd | |
- // | taskloop simd | distribute | |
- // | taskloop simd | distribute | |
- // | | parallel for | |
- // | taskloop simd | distribute | |
- // | |parallel for simd| |
- // | taskloop simd | distribute simd | |
- // | taskloop simd | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | distribute | parallel | * |
- // | distribute | for | * |
- // | distribute | for simd | * |
- // | distribute | master | * |
- // | distribute | critical | * |
- // | distribute | simd | * |
- // | distribute | sections | * |
- // | distribute | section | * |
- // | distribute | single | * |
- // | distribute | parallel for | * |
- // | distribute |parallel for simd| * |
- // | distribute |parallel sections| * |
- // | distribute | task | * |
- // | distribute | taskyield | * |
- // | distribute | barrier | * |
- // | distribute | taskwait | * |
- // | distribute | taskgroup | * |
- // | distribute | flush | * |
- // | distribute | ordered | + |
- // | distribute | atomic | * |
- // | distribute | target | |
- // | distribute | target parallel | |
- // | distribute | target parallel | |
- // | | for | |
- // | distribute | target enter | |
- // | | data | |
- // | distribute | target exit | |
- // | | data | |
- // | distribute | teams | |
- // | distribute | cancellation | + |
- // | | point | |
- // | distribute | cancel | + |
- // | distribute | taskloop | * |
- // | distribute | taskloop simd | * |
- // | distribute | distribute | |
- // | distribute | distribute | |
- // | | parallel for | |
- // | distribute | distribute | |
- // | |parallel for simd| |
- // | distribute | distribute simd | |
- // | distribute | target parallel | |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | distribute | parallel | * |
- // | parallel for | | |
- // | distribute | for | * |
- // | parallel for | | |
- // | distribute | for simd | * |
- // | parallel for | | |
- // | distribute | master | * |
- // | parallel for | | |
- // | distribute | critical | * |
- // | parallel for | | |
- // | distribute | simd | * |
- // | parallel for | | |
- // | distribute | sections | * |
- // | parallel for | | |
- // | distribute | section | * |
- // | parallel for | | |
- // | distribute | single | * |
- // | parallel for | | |
- // | distribute | parallel for | * |
- // | parallel for | | |
- // | distribute |parallel for simd| * |
- // | parallel for | | |
- // | distribute |parallel sections| * |
- // | parallel for | | |
- // | distribute | task | * |
- // | parallel for | | |
- // | parallel for | | |
- // | distribute | taskyield | * |
- // | parallel for | | |
- // | distribute | barrier | * |
- // | parallel for | | |
- // | distribute | taskwait | * |
- // | parallel for | | |
- // | distribute | taskgroup | * |
- // | parallel for | | |
- // | distribute | flush | * |
- // | parallel for | | |
- // | distribute | ordered | + |
- // | parallel for | | |
- // | distribute | atomic | * |
- // | parallel for | | |
- // | distribute | target | |
- // | parallel for | | |
- // | distribute | target parallel | |
- // | parallel for | | |
- // | distribute | target parallel | |
- // | parallel for | for | |
- // | distribute | target enter | |
- // | parallel for | data | |
- // | distribute | target exit | |
- // | parallel for | data | |
- // | distribute | teams | |
- // | parallel for | | |
- // | distribute | cancellation | + |
- // | parallel for | point | |
- // | distribute | cancel | + |
- // | parallel for | | |
- // | distribute | taskloop | * |
- // | parallel for | | |
- // | distribute | taskloop simd | * |
- // | parallel for | | |
- // | distribute | distribute | |
- // | parallel for | | |
- // | distribute | distribute | |
- // | parallel for | parallel for | |
- // | distribute | distribute | |
- // | parallel for |parallel for simd| |
- // | distribute | distribute simd | |
- // | parallel for | | |
- // | distribute | target parallel | |
- // | parallel for | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | distribute | parallel | * |
- // | parallel for simd| | |
- // | distribute | for | * |
- // | parallel for simd| | |
- // | distribute | for simd | * |
- // | parallel for simd| | |
- // | distribute | master | * |
- // | parallel for simd| | |
- // | distribute | critical | * |
- // | parallel for simd| | |
- // | distribute | simd | * |
- // | parallel for simd| | |
- // | distribute | sections | * |
- // | parallel for simd| | |
- // | distribute | section | * |
- // | parallel for simd| | |
- // | distribute | single | * |
- // | parallel for simd| | |
- // | distribute | parallel for | * |
- // | parallel for simd| | |
- // | distribute |parallel for simd| * |
- // | parallel for simd| | |
- // | distribute |parallel sections| * |
- // | parallel for simd| | |
- // | distribute | task | * |
- // | parallel for simd| | |
- // | distribute | taskyield | * |
- // | parallel for simd| | |
- // | distribute | barrier | * |
- // | parallel for simd| | |
- // | distribute | taskwait | * |
- // | parallel for simd| | |
- // | distribute | taskgroup | * |
- // | parallel for simd| | |
- // | distribute | flush | * |
- // | parallel for simd| | |
- // | distribute | ordered | + |
- // | parallel for simd| | |
- // | distribute | atomic | * |
- // | parallel for simd| | |
- // | distribute | target | |
- // | parallel for simd| | |
- // | distribute | target parallel | |
- // | parallel for simd| | |
- // | distribute | target parallel | |
- // | parallel for simd| for | |
- // | distribute | target enter | |
- // | parallel for simd| data | |
- // | distribute | target exit | |
- // | parallel for simd| data | |
- // | distribute | teams | |
- // | parallel for simd| | |
- // | distribute | cancellation | + |
- // | parallel for simd| point | |
- // | distribute | cancel | + |
- // | parallel for simd| | |
- // | distribute | taskloop | * |
- // | parallel for simd| | |
- // | distribute | taskloop simd | * |
- // | parallel for simd| | |
- // | distribute | distribute | |
- // | parallel for simd| | |
- // | distribute | distribute | * |
- // | parallel for simd| parallel for | |
- // | distribute | distribute | * |
- // | parallel for simd|parallel for simd| |
- // | distribute | distribute simd | * |
- // | parallel for simd| | |
- // | distribute | target parallel | |
- // | parallel for simd| for simd | |
- // +------------------+-----------------+------------------------------------+
- // | distribute simd | parallel | * |
- // | distribute simd | for | * |
- // | distribute simd | for simd | * |
- // | distribute simd | master | * |
- // | distribute simd | critical | * |
- // | distribute simd | simd | * |
- // | distribute simd | sections | * |
- // | distribute simd | section | * |
- // | distribute simd | single | * |
- // | distribute simd | parallel for | * |
- // | distribute simd |parallel for simd| * |
- // | distribute simd |parallel sections| * |
- // | distribute simd | task | * |
- // | distribute simd | taskyield | * |
- // | distribute simd | barrier | * |
- // | distribute simd | taskwait | * |
- // | distribute simd | taskgroup | * |
- // | distribute simd | flush | * |
- // | distribute simd | ordered | + |
- // | distribute simd | atomic | * |
- // | distribute simd | target | * |
- // | distribute simd | target parallel | * |
- // | distribute simd | target parallel | * |
- // | | for | |
- // | distribute simd | target enter | * |
- // | | data | |
- // | distribute simd | target exit | * |
- // | | data | |
- // | distribute simd | teams | * |
- // | distribute simd | cancellation | + |
- // | | point | |
- // | distribute simd | cancel | + |
- // | distribute simd | taskloop | * |
- // | distribute simd | taskloop simd | * |
- // | distribute simd | distribute | |
- // | distribute simd | distribute | * |
- // | | parallel for | |
- // | distribute simd | distribute | * |
- // | |parallel for simd| |
- // | distribute simd | distribute simd | * |
- // | distribute simd | target parallel | * |
- // | | for simd | |
- // +------------------+-----------------+------------------------------------+
- // | target parallel | parallel | * |
- // | for simd | | |
- // | target parallel | for | * |
- // | for simd | | |
- // | target parallel | for simd | * |
- // | for simd | | |
- // | target parallel | master | * |
- // | for simd | | |
- // | target parallel | critical | * |
- // | for simd | | |
- // | target parallel | simd | ! |
- // | for simd | | |
- // | target parallel | sections | * |
- // | for simd | | |
- // | target parallel | section | * |
- // | for simd | | |
- // | target parallel | single | * |
- // | for simd | | |
- // | target parallel | parallel for | * |
- // | for simd | | |
- // | target parallel |parallel for simd| * |
- // | for simd | | |
- // | target parallel |parallel sections| * |
- // | for simd | | |
- // | target parallel | task | * |
- // | for simd | | |
- // | target parallel | taskyield | * |
- // | for simd | | |
- // | target parallel | barrier | * |
- // | for simd | | |
- // | target parallel | taskwait | * |
- // | for simd | | |
- // | target parallel | taskgroup | * |
- // | for simd | | |
- // | target parallel | flush | * |
- // | for simd | | |
- // | target parallel | ordered | + (with simd clause) |
- // | for simd | | |
- // | target parallel | atomic | * |
- // | for simd | | |
- // | target parallel | target | * |
- // | for simd | | |
- // | target parallel | target parallel | * |
- // | for simd | | |
- // | target parallel | target parallel | * |
- // | for simd | for | |
- // | target parallel | target enter | * |
- // | for simd | data | |
- // | target parallel | target exit | * |
- // | for simd | data | |
- // | target parallel | teams | * |
- // | for simd | | |
- // | target parallel | cancellation | * |
- // | for simd | point | |
- // | target parallel | cancel | * |
- // | for simd | | |
- // | target parallel | taskloop | * |
- // | for simd | | |
- // | target parallel | taskloop simd | * |
- // | for simd | | |
- // | target parallel | distribute | * |
- // | for simd | | |
- // | target parallel | distribute | * |
- // | for simd | parallel for | |
- // | target parallel | distribute | * |
- // | for simd |parallel for simd| |
- // | target parallel | distribute simd | * |
- // | for simd | | |
- // | target parallel | target parallel | * |
- // | for simd | for simd | |
- // +------------------+-----------------+------------------------------------+
if (Stack->getCurScope()) {
auto ParentRegion = Stack->getParentDirective();
auto OffendingRegion = ParentRegion;
bool NestingProhibited = false;
bool CloseNesting = true;
+ bool OrphanSeen = false;
enum {
NoRecommend,
ShouldBeInParallelRegion,
@@ -3116,7 +1903,7 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// OpenMP [2.8.1,simd Construct, Restrictions]
// An ordered construct with the simd clause is the only OpenMP
// construct that can appear in the simd region.
- // Allowing a SIMD consruct nested in another SIMD construct is an
+ // Allowing a SIMD construct nested in another SIMD construct is an
// extension. The OpenMP 4.5 spec does not allow it. Issue a warning
// message.
SemaRef.Diag(StartLoc, (CurrentRegion != OMPD_simd)
@@ -3144,9 +1931,11 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
}
return false;
}
- // Allow some constructs to be orphaned (they could be used in functions,
- // called from OpenMP regions with the required preconditions).
- if (ParentRegion == OMPD_unknown)
+ // Allow some constructs (except teams) to be orphaned (they could be
+ // used in functions, called from OpenMP regions with the required
+ // preconditions).
+ if (ParentRegion == OMPD_unknown &&
+ !isOpenMPNestingTeamsDirective(CurrentRegion))
return false;
if (CurrentRegion == OMPD_cancellation_point ||
CurrentRegion == OMPD_cancel) {
@@ -3184,20 +1973,17 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
// critical region with the same name. Note that this restriction is not
// sufficient to prevent deadlock.
SourceLocation PreviousCriticalLoc;
- bool DeadLock =
- Stack->hasDirective([CurrentName, &PreviousCriticalLoc](
- OpenMPDirectiveKind K,
- const DeclarationNameInfo &DNI,
- SourceLocation Loc)
- ->bool {
- if (K == OMPD_critical &&
- DNI.getName() == CurrentName.getName()) {
- PreviousCriticalLoc = Loc;
- return true;
- } else
- return false;
- },
- false /* skip top directive */);
+ bool DeadLock = Stack->hasDirective(
+ [CurrentName, &PreviousCriticalLoc](OpenMPDirectiveKind K,
+ const DeclarationNameInfo &DNI,
+ SourceLocation Loc) -> bool {
+ if (K == OMPD_critical && DNI.getName() == CurrentName.getName()) {
+ PreviousCriticalLoc = Loc;
+ return true;
+ } else
+ return false;
+ },
+ false /* skip top directive */);
if (DeadLock) {
SemaRef.Diag(StartLoc,
diag::err_omp_prohibited_region_critical_same_name)
@@ -3217,7 +2003,8 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
ParentRegion == OMPD_critical ||
ParentRegion == OMPD_ordered;
} else if (isOpenMPWorksharingDirective(CurrentRegion) &&
- !isOpenMPParallelDirective(CurrentRegion)) {
+ !isOpenMPParallelDirective(CurrentRegion) &&
+ !isOpenMPTeamsDirective(CurrentRegion)) {
// OpenMP [2.16, Nesting of Regions]
// A worksharing region may not be closely nested inside a worksharing,
// explicit task, critical, ordered, atomic, or master region.
@@ -3241,15 +2028,19 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
!(isOpenMPSimdDirective(ParentRegion) ||
Stack->isParentOrderedRegion());
Recommend = ShouldBeInOrderedRegion;
- } else if (isOpenMPTeamsDirective(CurrentRegion)) {
+ } else if (isOpenMPNestingTeamsDirective(CurrentRegion)) {
// OpenMP [2.16, Nesting of Regions]
// If specified, a teams construct must be contained within a target
// construct.
NestingProhibited = ParentRegion != OMPD_target;
+ OrphanSeen = ParentRegion == OMPD_unknown;
Recommend = ShouldBeInTargetRegion;
Stack->setParentTeamsRegionLoc(Stack->getConstructLoc());
}
- if (!NestingProhibited && isOpenMPTeamsDirective(ParentRegion)) {
+ if (!NestingProhibited &&
+ !isOpenMPTargetExecutionDirective(CurrentRegion) &&
+ !isOpenMPTargetDataManagementDirective(CurrentRegion) &&
+ (ParentRegion == OMPD_teams || ParentRegion == OMPD_target_teams)) {
// OpenMP [2.16, Nesting of Regions]
// distribute, parallel, parallel sections, parallel workshare, and the
// parallel loop and parallel loop SIMD constructs are the only OpenMP
@@ -3258,11 +2049,13 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
!isOpenMPDistributeDirective(CurrentRegion);
Recommend = ShouldBeInParallelRegion;
}
- if (!NestingProhibited && isOpenMPDistributeDirective(CurrentRegion)) {
+ if (!NestingProhibited &&
+ isOpenMPNestingDistributeDirective(CurrentRegion)) {
// OpenMP 4.5 [2.17 Nesting of Regions]
// The region associated with the distribute construct must be strictly
// nested inside a teams region
- NestingProhibited = !isOpenMPTeamsDirective(ParentRegion);
+ NestingProhibited =
+ (ParentRegion != OMPD_teams && ParentRegion != OMPD_target_teams);
Recommend = ShouldBeInTeamsRegion;
}
if (!NestingProhibited &&
@@ -3285,9 +2078,14 @@ static bool CheckNestingOfRegions(Sema &SemaRef, DSAStackTy *Stack,
CloseNesting = false;
}
if (NestingProhibited) {
- SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
- << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
- << Recommend << getOpenMPDirectiveName(CurrentRegion);
+ if (OrphanSeen) {
+ SemaRef.Diag(StartLoc, diag::err_omp_orphaned_device_directive)
+ << getOpenMPDirectiveName(CurrentRegion) << Recommend;
+ } else {
+ SemaRef.Diag(StartLoc, diag::err_omp_prohibited_region)
+ << CloseNesting << getOpenMPDirectiveName(OffendingRegion)
+ << Recommend << getOpenMPDirectiveName(CurrentRegion);
+ }
return true;
}
}
@@ -3602,6 +2400,45 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
AllowedNameModifiers.push_back(OMPD_target);
AllowedNameModifiers.push_back(OMPD_parallel);
break;
+ case OMPD_target_simd:
+ Res = ActOnOpenMPTargetSimdDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_target);
+ break;
+ case OMPD_teams_distribute:
+ Res = ActOnOpenMPTeamsDistributeDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_teams_distribute_simd:
+ Res = ActOnOpenMPTeamsDistributeSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_teams_distribute_parallel_for_simd:
+ Res = ActOnOpenMPTeamsDistributeParallelForSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
+ case OMPD_teams_distribute_parallel_for:
+ Res = ActOnOpenMPTeamsDistributeParallelForDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
+ case OMPD_target_teams:
+ Res = ActOnOpenMPTargetTeamsDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ AllowedNameModifiers.push_back(OMPD_target);
+ break;
+ case OMPD_target_teams_distribute:
+ Res = ActOnOpenMPTargetTeamsDistributeDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_target);
+ break;
+ case OMPD_target_teams_distribute_parallel_for:
+ Res = ActOnOpenMPTargetTeamsDistributeParallelForDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_target);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_threadprivate:
@@ -3969,7 +2806,7 @@ public:
/// \brief Build reference expression to the private counter be used for
/// codegen.
Expr *BuildPrivateCounterVar() const;
- /// \brief Build initization of the counter be used for codegen.
+ /// \brief Build initialization of the counter be used for codegen.
Expr *BuildCounterInit() const;
/// \brief Build step of the counter be used for codegen.
Expr *BuildCounterStep() const;
@@ -4094,8 +2931,9 @@ bool OpenMPIterationSpaceChecker::SetStep(Expr *NewStep, bool Subtract) {
return true;
}
if (TestIsLessOp == Subtract) {
- NewStep = SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus,
- NewStep).get();
+ NewStep =
+ SemaRef.CreateBuiltinUnaryOp(NewStep->getExprLoc(), UO_Minus, NewStep)
+ .get();
Subtract = !Subtract;
}
}
@@ -4127,7 +2965,7 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
InitSrcRange = S->getSourceRange();
if (Expr *E = dyn_cast<Expr>(S))
S = E->IgnoreParens();
- if (auto BO = dyn_cast<BinaryOperator>(S)) {
+ if (auto *BO = dyn_cast<BinaryOperator>(S)) {
if (BO->getOpcode() == BO_Assign) {
auto *LHS = BO->getLHS()->IgnoreParens();
if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
@@ -4142,9 +2980,9 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
return SetLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
}
}
- } else if (auto DS = dyn_cast<DeclStmt>(S)) {
+ } else if (auto *DS = dyn_cast<DeclStmt>(S)) {
if (DS->isSingleDecl()) {
- if (auto Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
+ if (auto *Var = dyn_cast_or_null<VarDecl>(DS->getSingleDecl())) {
if (Var->hasInit() && !Var->getType()->isReferenceType()) {
// Accept non-canonical init form here but emit ext. warning.
if (Var->getInitStyle() != VarDecl::CInit && EmitDiags)
@@ -4155,10 +2993,10 @@ bool OpenMPIterationSpaceChecker::CheckInit(Stmt *S, bool EmitDiags) {
}
}
}
- } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) {
+ } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
if (CE->getOperator() == OO_Equal) {
auto *LHS = CE->getArg(0);
- if (auto DRE = dyn_cast<DeclRefExpr>(LHS)) {
+ if (auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
if (auto *CED = dyn_cast<OMPCapturedExprDecl>(DRE->getDecl()))
if (auto *ME = dyn_cast<MemberExpr>(getExprAsWritten(CED->getInit())))
return SetLCDeclAndLB(ME->getMemberDecl(), ME, BO->getRHS());
@@ -4220,7 +3058,7 @@ bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) {
}
S = getExprAsWritten(S);
SourceLocation CondLoc = S->getLocStart();
- if (auto BO = dyn_cast<BinaryOperator>(S)) {
+ if (auto *BO = dyn_cast<BinaryOperator>(S)) {
if (BO->isRelationalOp()) {
if (GetInitLCDecl(BO->getLHS()) == LCDecl)
return SetUB(BO->getRHS(),
@@ -4233,7 +3071,7 @@ bool OpenMPIterationSpaceChecker::CheckCond(Expr *S) {
(BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
BO->getSourceRange(), BO->getOperatorLoc());
}
- } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) {
+ } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
if (CE->getNumArgs() == 2) {
auto Op = CE->getOperator();
switch (Op) {
@@ -4269,7 +3107,7 @@ bool OpenMPIterationSpaceChecker::CheckIncRHS(Expr *RHS) {
// var - incr
//
RHS = RHS->IgnoreParenImpCasts();
- if (auto BO = dyn_cast<BinaryOperator>(RHS)) {
+ if (auto *BO = dyn_cast<BinaryOperator>(RHS)) {
if (BO->isAdditiveOp()) {
bool IsAdd = BO->getOpcode() == BO_Add;
if (GetInitLCDecl(BO->getLHS()) == LCDecl)
@@ -4277,7 +3115,7 @@ bool OpenMPIterationSpaceChecker::CheckIncRHS(Expr *RHS) {
if (IsAdd && GetInitLCDecl(BO->getRHS()) == LCDecl)
return SetStep(BO->getLHS(), false);
}
- } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
+ } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(RHS)) {
bool IsAdd = CE->getOperator() == OO_Plus;
if ((IsAdd || CE->getOperator() == OO_Minus) && CE->getNumArgs() == 2) {
if (GetInitLCDecl(CE->getArg(0)) == LCDecl)
@@ -4317,14 +3155,15 @@ bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
IncrementSrcRange = S->getSourceRange();
S = S->IgnoreParens();
- if (auto UO = dyn_cast<UnaryOperator>(S)) {
+ if (auto *UO = dyn_cast<UnaryOperator>(S)) {
if (UO->isIncrementDecrementOp() &&
GetInitLCDecl(UO->getSubExpr()) == LCDecl)
- return SetStep(
- SemaRef.ActOnIntegerConstant(UO->getLocStart(),
- (UO->isDecrementOp() ? -1 : 1)).get(),
- false);
- } else if (auto BO = dyn_cast<BinaryOperator>(S)) {
+ return SetStep(SemaRef
+ .ActOnIntegerConstant(UO->getLocStart(),
+ (UO->isDecrementOp() ? -1 : 1))
+ .get(),
+ false);
+ } else if (auto *BO = dyn_cast<BinaryOperator>(S)) {
switch (BO->getOpcode()) {
case BO_AddAssign:
case BO_SubAssign:
@@ -4338,16 +3177,17 @@ bool OpenMPIterationSpaceChecker::CheckInc(Expr *S) {
default:
break;
}
- } else if (auto CE = dyn_cast<CXXOperatorCallExpr>(S)) {
+ } else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
switch (CE->getOperator()) {
case OO_PlusPlus:
case OO_MinusMinus:
if (GetInitLCDecl(CE->getArg(0)) == LCDecl)
- return SetStep(
- SemaRef.ActOnIntegerConstant(
- CE->getLocStart(),
- ((CE->getOperator() == OO_MinusMinus) ? -1 : 1)).get(),
- false);
+ return SetStep(SemaRef
+ .ActOnIntegerConstant(
+ CE->getLocStart(),
+ ((CE->getOperator() == OO_MinusMinus) ? -1 : 1))
+ .get(),
+ false);
break;
case OO_PlusEqual:
case OO_MinusEqual:
@@ -4544,7 +3384,7 @@ Expr *OpenMPIterationSpaceChecker::BuildPrivateCounterVar() const {
return nullptr;
}
-/// \brief Build initization of the counter be used for codegen.
+/// \brief Build initialization of the counter to be used for codegen.
Expr *OpenMPIterationSpaceChecker::BuildCounterInit() const { return LB; }
/// \brief Build step of the counter be used for codegen.
@@ -4615,7 +3455,7 @@ static bool CheckOpenMPIterationSpace(
llvm::MapVector<Expr *, DeclRefExpr *> &Captures) {
// OpenMP [2.6, Canonical Loop Form]
// for (init-expr; test-expr; incr-expr) structured-block
- auto For = dyn_cast_or_null<ForStmt>(S);
+ auto *For = dyn_cast_or_null<ForStmt>(S);
if (!For) {
SemaRef.Diag(S->getLocStart(), diag::err_omp_not_for)
<< (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
@@ -4855,8 +3695,7 @@ BuildCounterUpdate(Sema &SemaRef, Scope *S, SourceLocation Loc,
/// \brief Convert integer expression \a E to make it have at least \a Bits
/// bits.
-static ExprResult WidenIterationCount(unsigned Bits, Expr *E,
- Sema &SemaRef) {
+static ExprResult WidenIterationCount(unsigned Bits, Expr *E, Sema &SemaRef) {
if (E == nullptr)
return ExprError();
auto &C = SemaRef.Context;
@@ -5014,15 +3853,17 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
auto PreCond = ExprResult(IterSpaces[0].PreCond);
auto N0 = IterSpaces[0].NumIterations;
ExprResult LastIteration32 = WidenIterationCount(
- 32 /* Bits */, SemaRef.PerformImplicitConversion(
- N0->IgnoreImpCasts(), N0->getType(),
- Sema::AA_Converting, /*AllowExplicit=*/true)
+ 32 /* Bits */, SemaRef
+ .PerformImplicitConversion(
+ N0->IgnoreImpCasts(), N0->getType(),
+ Sema::AA_Converting, /*AllowExplicit=*/true)
.get(),
SemaRef);
ExprResult LastIteration64 = WidenIterationCount(
- 64 /* Bits */, SemaRef.PerformImplicitConversion(
- N0->IgnoreImpCasts(), N0->getType(),
- Sema::AA_Converting, /*AllowExplicit=*/true)
+ 64 /* Bits */, SemaRef
+ .PerformImplicitConversion(
+ N0->IgnoreImpCasts(), N0->getType(),
+ Sema::AA_Converting, /*AllowExplicit=*/true)
.get(),
SemaRef);
@@ -5035,24 +3876,28 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Scope *CurScope = DSA.getCurScope();
for (unsigned Cnt = 1; Cnt < NestedLoopCount; ++Cnt) {
if (PreCond.isUsable()) {
- PreCond = SemaRef.BuildBinOp(CurScope, SourceLocation(), BO_LAnd,
- PreCond.get(), IterSpaces[Cnt].PreCond);
+ PreCond =
+ SemaRef.BuildBinOp(CurScope, PreCond.get()->getExprLoc(), BO_LAnd,
+ PreCond.get(), IterSpaces[Cnt].PreCond);
}
auto N = IterSpaces[Cnt].NumIterations;
+ SourceLocation Loc = N->getExprLoc();
AllCountsNeedLessThan32Bits &= C.getTypeSize(N->getType()) < 32;
if (LastIteration32.isUsable())
LastIteration32 = SemaRef.BuildBinOp(
- CurScope, SourceLocation(), BO_Mul, LastIteration32.get(),
- SemaRef.PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
- Sema::AA_Converting,
- /*AllowExplicit=*/true)
+ CurScope, Loc, BO_Mul, LastIteration32.get(),
+ SemaRef
+ .PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
.get());
if (LastIteration64.isUsable())
LastIteration64 = SemaRef.BuildBinOp(
- CurScope, SourceLocation(), BO_Mul, LastIteration64.get(),
- SemaRef.PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
- Sema::AA_Converting,
- /*AllowExplicit=*/true)
+ CurScope, Loc, BO_Mul, LastIteration64.get(),
+ SemaRef
+ .PerformImplicitConversion(N->IgnoreImpCasts(), N->getType(),
+ Sema::AA_Converting,
+ /*AllowExplicit=*/true)
.get());
}
@@ -5083,7 +3928,8 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
ExprResult NumIterations = LastIteration;
{
LastIteration = SemaRef.BuildBinOp(
- CurScope, SourceLocation(), BO_Sub, LastIteration.get(),
+ CurScope, LastIteration.get()->getExprLoc(), BO_Sub,
+ LastIteration.get(),
SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
if (!LastIteration.isUsable())
return 0;
@@ -5102,7 +3948,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Prepare SaveRef + 1.
NumIterations = SemaRef.BuildBinOp(
- CurScope, SourceLocation(), BO_Add, SaveRef.get(),
+ CurScope, SaveRef.get()->getExprLoc(), BO_Add, SaveRef.get(),
SemaRef.ActOnIntegerConstant(SourceLocation(), 1).get());
if (!NumIterations.isUsable())
return 0;
@@ -5110,7 +3956,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
SourceLocation InitLoc = IterSpaces[0].InitSrcRange.getBegin();
- // Build variables passed into runtime, nesessary for worksharing directives.
+ // Build variables passed into runtime, necessary for worksharing directives.
ExprResult LB, UB, IL, ST, EUB, PrevLB, PrevUB;
if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
isOpenMPDistributeDirective(DKind)) {
@@ -5146,7 +3992,7 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
/*DirectInit*/ false, /*TypeMayContainAuto*/ false);
// Build expression: UB = min(UB, LastIteration)
- // It is nesessary for CodeGen of directives with static scheduling.
+ // It is necessary for CodeGen of directives with static scheduling.
ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, InitLoc, BO_GT,
UB.get(), LastIteration.get());
ExprResult CondOp = SemaRef.ActOnConditionalOp(
@@ -5187,11 +4033,11 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
{
VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, RealVType, ".omp.iv");
IV = buildDeclRefExpr(SemaRef, IVDecl, RealVType, InitLoc);
- Expr *RHS = (isOpenMPWorksharingDirective(DKind) ||
- isOpenMPTaskLoopDirective(DKind) ||
- isOpenMPDistributeDirective(DKind))
- ? LB.get()
- : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
+ Expr *RHS =
+ (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind))
+ ? LB.get()
+ : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
Init = SemaRef.ActOnFinishFullExpr(Init.get());
}
@@ -5394,9 +4240,10 @@ CheckOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
}
assert(I->second == OO_Plus || I->second == OO_Minus);
BinaryOperatorKind BOK = (I->second == OO_Plus) ? BO_Add : BO_Sub;
- UpCounterVal =
- SemaRef.BuildBinOp(CurScope, I->first->getExprLoc(), BOK,
- UpCounterVal, NormalizedOffset).get();
+ UpCounterVal = SemaRef
+ .BuildBinOp(CurScope, I->first->getExprLoc(), BOK,
+ UpCounterVal, NormalizedOffset)
+ .get();
}
Multiplier = *ILM;
++I;
@@ -5491,7 +4338,7 @@ StmtResult Sema::ActOnOpenMPSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -5530,7 +4377,7 @@ StmtResult Sema::ActOnOpenMPForDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -5567,7 +4414,7 @@ StmtResult Sema::ActOnOpenMPForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -5592,9 +4439,9 @@ StmtResult Sema::ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
auto BaseStmt = AStmt;
- while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
+ while (auto *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
BaseStmt = CS->getCapturedStmt();
- if (auto C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
+ if (auto *C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
auto S = C->children();
if (S.begin() == S.end())
return StmtError();
@@ -5769,7 +4616,7 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -5811,7 +4658,7 @@ StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -5836,9 +4683,9 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
auto BaseStmt = AStmt;
- while (CapturedStmt *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
+ while (auto *CS = dyn_cast_or_null<CapturedStmt>(BaseStmt))
BaseStmt = CS->getCapturedStmt();
- if (auto C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
+ if (auto *C = dyn_cast_or_null<CompoundStmt>(BaseStmt)) {
auto S = C->children();
if (S.begin() == S.end())
return StmtError();
@@ -5872,7 +4719,7 @@ StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6169,21 +5016,21 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
AtomicCompAssignOp->getOpcode());
OpLoc = AtomicCompAssignOp->getOperatorLoc();
E = AtomicCompAssignOp->getRHS();
- X = AtomicCompAssignOp->getLHS();
+ X = AtomicCompAssignOp->getLHS()->IgnoreParens();
IsXLHSInRHSPart = true;
} else if (auto *AtomicBinOp = dyn_cast<BinaryOperator>(
AtomicBody->IgnoreParenImpCasts())) {
// Check for Binary Operation
- if(checkBinaryOperation(AtomicBinOp, DiagId, NoteId))
+ if (checkBinaryOperation(AtomicBinOp, DiagId, NoteId))
return true;
- } else if (auto *AtomicUnaryOp =
- dyn_cast<UnaryOperator>(AtomicBody->IgnoreParenImpCasts())) {
+ } else if (auto *AtomicUnaryOp = dyn_cast<UnaryOperator>(
+ AtomicBody->IgnoreParenImpCasts())) {
// Check for Unary Operation
if (AtomicUnaryOp->isIncrementDecrementOp()) {
IsPostfixUpdate = AtomicUnaryOp->isPostfix();
Op = AtomicUnaryOp->isIncrementOp() ? BO_Add : BO_Sub;
OpLoc = AtomicUnaryOp->getOperatorLoc();
- X = AtomicUnaryOp->getSubExpr();
+ X = AtomicUnaryOp->getSubExpr()->IgnoreParens();
E = SemaRef.ActOnIntegerConstant(OpLoc, /*uint64_t Val=*/1).get();
IsXLHSInRHSPart = true;
} else {
@@ -6243,7 +5090,7 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (!AStmt)
return StmtError();
- auto CS = cast<CapturedStmt>(AStmt);
+ auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
// Structured block - An executable statement with a single entry at the
// top and a single exit at the bottom.
@@ -6311,8 +5158,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SourceRange ErrorRange, NoteRange;
// If clause is read:
// v = x;
- if (auto AtomicBody = dyn_cast<Expr>(Body)) {
- auto AtomicBinOp =
+ if (auto *AtomicBody = dyn_cast<Expr>(Body)) {
+ auto *AtomicBinOp =
dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
X = AtomicBinOp->getRHS()->IgnoreParenImpCasts();
@@ -6373,8 +5220,8 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SourceRange ErrorRange, NoteRange;
// If clause is write:
// x = expr;
- if (auto AtomicBody = dyn_cast<Expr>(Body)) {
- auto AtomicBinOp =
+ if (auto *AtomicBody = dyn_cast<Expr>(Body)) {
+ auto *AtomicBinOp =
dyn_cast<BinaryOperator>(AtomicBody->IgnoreParenImpCasts());
if (AtomicBinOp && AtomicBinOp->getOpcode() == BO_Assign) {
X = AtomicBinOp->getLHS();
@@ -6692,7 +5539,7 @@ StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
if (auto *CS = dyn_cast<CompoundStmt>(S)) {
auto I = CS->body_begin();
while (I != CS->body_end()) {
- auto OED = dyn_cast<OMPExecutableDirective>(*I);
+ auto *OED = dyn_cast<OMPExecutableDirective>(*I);
if (!OED || !isOpenMPTeamsDirective(OED->getDirectiveKind())) {
OMPTeamsFound = false;
break;
@@ -6772,7 +5619,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -6810,8 +5657,8 @@ StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
// OpenMP [2.10.1, Restrictions, p. 97]
// At least one map clause must appear on the directive.
if (!HasMapClause(Clauses)) {
- Diag(StartLoc, diag::err_omp_no_map_for_directive) <<
- getOpenMPDirectiveName(OMPD_target_data);
+ Diag(StartLoc, diag::err_omp_no_map_for_directive)
+ << getOpenMPDirectiveName(OMPD_target_data);
return StmtError();
}
@@ -7011,7 +5858,7 @@ StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -7192,7 +6039,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
if (!CurContext->isDependentContext()) {
// Finalize the clauses that need pre-built expressions for CodeGen.
for (auto C : Clauses) {
- if (auto LC = dyn_cast<OMPLinearClause>(C))
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
B.NumIterations, *this, CurScope,
DSAStack))
@@ -7207,6 +6054,327 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
+StmtResult Sema::ActOnOpenMPTargetSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will define the
+ // nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_target_simd, getCollapseNumberExpr(Clauses),
+ getOrderedNumberExpr(Clauses), AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp target simd loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ if (checkSimdlenSafelenSpecified(*this, Clauses))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTargetSimdDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsDistributeDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ CheckOpenMPLoop(OMPD_teams_distribute, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt,
+ *this, *DSAStack, VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp teams distribute loop exprs were not built");
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTeamsDistributeDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = CheckOpenMPLoop(
+ OMPD_teams_distribute_simd, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp teams distribute simd loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ if (checkSimdlenSafelenSpecified(*this, Clauses))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTeamsDistributeSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ auto NestedLoopCount = CheckOpenMPLoop(
+ OMPD_teams_distribute_parallel_for_simd, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ if (checkSimdlenSafelenSpecified(*this, Clauses))
+ return StmtError();
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTeamsDistributeParallelForSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = CheckOpenMPLoop(
+ OMPD_teams_distribute_parallel_for, getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTeamsDistributeParallelForDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return OMPTargetTeamsDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ AStmt);
+}
+
+StmtResult Sema::ActOnOpenMPTargetTeamsDistributeDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ auto NestedLoopCount = CheckOpenMPLoop(
+ OMPD_target_teams_distribute,
+ getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp target teams distribute loop exprs were not built");
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTargetTeamsDistributeDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ llvm::DenseMap<ValueDecl *, Expr *> &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ CapturedStmt *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse' with number of loops, it will
+ // define the nested loops number.
+ auto NestedLoopCount = CheckOpenMPLoop(
+ OMPD_target_teams_distribute_parallel_for,
+ getCollapseNumberExpr(Clauses),
+ nullptr /*ordered not a clause on distribute*/, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp target teams distribute parallel for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (auto C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+ return OMPTargetTeamsDistributeParallelForDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -7683,8 +6851,8 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
Res = ActOnOpenMPDefaultmapClause(
static_cast<OpenMPDefaultmapClauseModifier>(Argument[Modifier]),
static_cast<OpenMPDefaultmapClauseKind>(Argument[DefaultmapKind]),
- StartLoc, LParenLoc, ArgumentLoc[Modifier],
- ArgumentLoc[DefaultmapKind], EndLoc);
+ StartLoc, LParenLoc, ArgumentLoc[Modifier], ArgumentLoc[DefaultmapKind],
+ EndLoc);
break;
case OMPC_final:
case OMPC_num_threads:
@@ -8025,7 +7193,7 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
Res = ActOnOpenMPFlushClause(VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_depend:
- Res = ActOnOpenMPDependClause(DepKind, DepLinMapLoc, ColonLoc, VarList,
+ Res = ActOnOpenMPDependClause(DepKind, DepLinMapLoc, ColonLoc, VarList,
StartLoc, LParenLoc, EndLoc);
break;
case OMPC_map:
@@ -8207,12 +7375,13 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
continue;
}
+ auto CurrDir = DSAStack->getCurrentDirective();
// Variably modified types are not supported for tasks.
if (!Type->isAnyPointerType() && Type->isVariablyModifiedType() &&
- isOpenMPTaskingDirective(DSAStack->getCurrentDirective())) {
+ isOpenMPTaskingDirective(CurrDir)) {
Diag(ELoc, diag::err_omp_variably_modified_type_not_supported)
<< getOpenMPClauseName(OMPC_private) << Type
- << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ << getOpenMPDirectiveName(CurrDir);
bool IsDecl =
!VD ||
VD->isThisDeclarationADefinition(Context) == VarDecl::DeclarationOnly;
@@ -8225,14 +7394,22 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
// OpenMP 4.5 [2.15.5.1, Restrictions, p.3]
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct
- if (DSAStack->getCurrentDirective() == OMPD_target) {
+ if (CurrDir == OMPD_target || CurrDir == OMPD_target_parallel ||
+ CurrDir == OMPD_target_teams ||
+ CurrDir == OMPD_target_teams_distribute ||
+ CurrDir == OMPD_target_teams_distribute_parallel_for) {
+ OpenMPClauseKind ConflictKind;
if (DSAStack->checkMappableExprComponentListsForDecl(
- VD, /* CurrentRegionOnly = */ true,
- [&](OMPClauseMappableExprCommon::MappableExprComponentListRef)
- -> bool { return true; })) {
- Diag(ELoc, diag::err_omp_variable_in_map_and_dsa)
+ VD, /*CurrentRegionOnly=*/true,
+ [&](OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPClauseKind WhereFoundClauseKind) -> bool {
+ ConflictKind = WhereFoundClauseKind;
+ return true;
+ })) {
+ Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
<< getOpenMPClauseName(OMPC_private)
- << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ << getOpenMPClauseName(ConflictKind)
+ << getOpenMPDirectiveName(CurrDir);
ReportOriginalDSA(*this, DSAStack, D, DVar);
continue;
}
@@ -8388,7 +7565,8 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// worksharing regions arising from the worksharing construct ever bind
// to any of the parallel regions arising from the parallel construct.
if (isOpenMPWorksharingDirective(CurrDir) &&
- !isOpenMPParallelDirective(CurrDir)) {
+ !isOpenMPParallelDirective(CurrDir) &&
+ !isOpenMPTeamsDirective(CurrDir)) {
DVar = DSAStack->getImplicitDSA(D, true);
if (DVar.CKind != OMPC_shared &&
(isOpenMPParallelDirective(DVar.DKind) ||
@@ -8476,13 +7654,21 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
// OpenMP 4.5 [2.15.5.1, Restrictions, p.3]
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct
- if (CurrDir == OMPD_target) {
+ if (CurrDir == OMPD_target || CurrDir == OMPD_target_parallel ||
+ CurrDir == OMPD_target_teams ||
+ CurrDir == OMPD_target_teams_distribute ||
+ CurrDir == OMPD_target_teams_distribute_parallel_for) {
+ OpenMPClauseKind ConflictKind;
if (DSAStack->checkMappableExprComponentListsForDecl(
- VD, /* CurrentRegionOnly = */ true,
- [&](OMPClauseMappableExprCommon::MappableExprComponentListRef)
- -> bool { return true; })) {
- Diag(ELoc, diag::err_omp_variable_in_map_and_dsa)
+ VD, /*CurrentRegionOnly=*/true,
+ [&](OMPClauseMappableExprCommon::MappableExprComponentListRef,
+ OpenMPClauseKind WhereFoundClauseKind) -> bool {
+ ConflictKind = WhereFoundClauseKind;
+ return true;
+ })) {
+ Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
<< getOpenMPClauseName(OMPC_firstprivate)
+ << getOpenMPClauseName(ConflictKind)
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
ReportOriginalDSA(*this, DSAStack, D, DVar);
continue;
@@ -8645,7 +7831,8 @@ OMPClause *Sema::ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
// regions.
DSAStackTy::DSAVarData TopDVar = DVar;
if (isOpenMPWorksharingDirective(CurrDir) &&
- !isOpenMPParallelDirective(CurrDir)) {
+ !isOpenMPParallelDirective(CurrDir) &&
+ !isOpenMPTeamsDirective(CurrDir)) {
DVar = DSAStack->getImplicitDSA(D, true);
if (DVar.CKind != OMPC_shared) {
Diag(ELoc, diag::err_omp_required_access)
@@ -8884,7 +8071,7 @@ buildDeclareReductionRef(Sema &SemaRef, SourceLocation Loc, SourceRange Range,
cast_or_null<UnresolvedLookupExpr>(UnresolvedReduction)) {
Lookups.push_back(UnresolvedSet<8>());
Decl *PrevD = nullptr;
- for(auto *D : ULE->decls()) {
+ for (auto *D : ULE->decls()) {
if (D == PrevD)
Lookups.push_back(UnresolvedSet<8>());
else if (auto *DRD = cast<OMPDeclareReductionDecl>(D))
@@ -9175,7 +8362,8 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
// worksharing regions arising from the worksharing construct bind.
OpenMPDirectiveKind CurrDir = DSAStack->getCurrentDirective();
if (isOpenMPWorksharingDirective(CurrDir) &&
- !isOpenMPParallelDirective(CurrDir)) {
+ !isOpenMPParallelDirective(CurrDir) &&
+ !isOpenMPTeamsDirective(CurrDir)) {
DVar = DSAStack->getImplicitDSA(D, true);
if (DVar.CKind != OMPC_shared) {
Diag(ELoc, diag::err_omp_required_access)
@@ -9260,7 +8448,7 @@ OMPClause *Sema::ActOnOpenMPReductionClause(
if (OASE ||
(!ASE &&
D->getType().getNonReferenceType()->isVariablyModifiedType())) {
- // For arays/array sections only:
+ // For arrays/array sections only:
// Create pseudo array type for private copy. The size for this array will
// be generated during codegen.
// For array subscripts or single variables Private Ty is the same as Type
@@ -9738,7 +8926,7 @@ static bool FinishOpenMPLinearClause(OMPLinearClause &Clause, DeclRefExpr *IV,
Expr *InitExpr = *CurInit;
// Build privatized reference to the current linear var.
- auto DE = cast<DeclRefExpr>(SimpleRefExpr);
+ auto *DE = cast<DeclRefExpr>(SimpleRefExpr);
Expr *CapturedRef;
if (LinKind == OMPC_LINEAR_uval)
CapturedRef = cast<VarDecl>(DE->getDecl())->getInit();
@@ -10040,8 +9228,7 @@ OMPClause *Sema::ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
auto *DstVD =
buildVarDecl(*this, RefExpr->getLocStart(), Type, ".copyprivate.dst",
D->hasAttrs() ? &D->getAttrs() : nullptr);
- auto *PseudoDstExpr =
- buildDeclRefExpr(*this, DstVD, Type, ELoc);
+ auto *PseudoDstExpr = buildDeclRefExpr(*this, DstVD, Type, ELoc);
auto AssignmentOp = BuildBinOp(DSAStack->getCurScope(), ELoc, BO_Assign,
PseudoDstExpr, PseudoSrcExpr);
if (AssignmentOp.isInvalid())
@@ -10256,9 +9443,6 @@ static bool IsCXXRecordForMappable(Sema &SemaRef, SourceLocation Loc,
if (!RD || RD->isInvalidDecl())
return true;
- if (auto *CTSD = dyn_cast<ClassTemplateSpecializationDecl>(RD))
- if (auto *CTD = CTSD->getSpecializedTemplate())
- RD = CTD->getTemplatedDecl();
auto QTy = SemaRef.Context.getRecordType(RD);
if (RD->isDynamicClass()) {
SemaRef.Diag(Loc, diag::err_omp_not_mappable_type) << QTy;
@@ -10302,8 +9486,7 @@ static bool CheckTypeMappable(SourceLocation SL, SourceRange SR, Sema &SemaRef,
SemaRef.Diag(SL, diag::err_incomplete_type) << QTy << SR;
return false;
} else if (CXXRecordDecl *RD = dyn_cast_or_null<CXXRecordDecl>(ND)) {
- if (!RD->isInvalidDecl() &&
- !IsCXXRecordForMappable(SemaRef, SL, Stack, RD))
+ if (!RD->isInvalidDecl() && !IsCXXRecordForMappable(SemaRef, SL, Stack, RD))
return false;
}
return true;
@@ -10332,7 +9515,7 @@ static bool CheckArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
auto *Length = OASE->getLength();
// If there is a lower bound that does not evaluates to zero, we are not
- // convering the whole dimension.
+ // covering the whole dimension.
if (LowerBound) {
llvm::APSInt ConstLowerBound;
if (!LowerBound->EvaluateAsInt(ConstLowerBound, SemaRef.getASTContext()))
@@ -10367,8 +9550,8 @@ static bool CheckArrayExpressionDoesNotReferToWholeSize(Sema &SemaRef,
// section or array subscript) does NOT specify a single element of the array
// whose base type is \a BaseQTy.
static bool CheckArrayExpressionDoesNotReferToUnitySize(Sema &SemaRef,
- const Expr *E,
- QualType BaseQTy) {
+ const Expr *E,
+ QualType BaseQTy) {
auto *OASE = dyn_cast<OMPArraySectionExpr>(E);
// An array subscript always refer to a single element. Also, an array section
@@ -10574,16 +9757,16 @@ static Expr *CheckMapClauseExpressionBase(
bool NotUnity =
CheckArrayExpressionDoesNotReferToUnitySize(SemaRef, CurE, CurType);
- if (AllowWholeSizeArraySection && AllowUnitySizeArraySection) {
- // Any array section is currently allowed.
+ if (AllowWholeSizeArraySection) {
+ // Any array section is currently allowed. Allowing a whole size array
+ // section implies allowing a unity array section as well.
//
// If this array section refers to the whole dimension we can still
// accept other array sections before this one, except if the base is a
// pointer. Otherwise, only unitary sections are accepted.
if (NotWhole || IsPointer)
AllowWholeSizeArraySection = false;
- } else if ((AllowUnitySizeArraySection && NotUnity) ||
- (AllowWholeSizeArraySection && NotWhole)) {
+ } else if (AllowUnitySizeArraySection && NotUnity) {
// A unity or whole array section is not allowed and that is not
// compatible with the properties of the current array section.
SemaRef.Diag(
@@ -10634,7 +9817,8 @@ static bool CheckMapConflicts(
bool FoundError = DSAS->checkMappableExprComponentListsForDecl(
VD, CurrentRegionOnly,
[&](OMPClauseMappableExprCommon::MappableExprComponentListRef
- StackComponents) -> bool {
+ StackComponents,
+ OpenMPClauseKind) -> bool {
assert(!StackComponents.empty() &&
"Map clause expression with no components!");
@@ -10686,10 +9870,10 @@ static bool CheckMapConflicts(
for (; SI != SE; ++SI) {
QualType Type;
if (auto *ASE =
- dyn_cast<ArraySubscriptExpr>(SI->getAssociatedExpression())) {
+ dyn_cast<ArraySubscriptExpr>(SI->getAssociatedExpression())) {
Type = ASE->getBase()->IgnoreParenImpCasts()->getType();
- } else if (auto *OASE =
- dyn_cast<OMPArraySectionExpr>(SI->getAssociatedExpression())) {
+ } else if (auto *OASE = dyn_cast<OMPArraySectionExpr>(
+ SI->getAssociatedExpression())) {
auto *E = OASE->getBase()->IgnoreParenImpCasts();
Type =
OMPArraySectionExpr::getBaseOriginalType(E).getCanonicalType();
@@ -10989,11 +10173,14 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// OpenMP 4.5 [2.15.5.1, Restrictions, p.3]
// A list item cannot appear in both a map clause and a data-sharing
// attribute clause on the same construct
- if (DKind == OMPD_target && VD) {
+ if ((DKind == OMPD_target || DKind == OMPD_target_teams ||
+ DKind == OMPD_target_teams_distribute ||
+ DKind == OMPD_target_teams_distribute_parallel_for) && VD) {
auto DVar = DSAS->getTopDSA(VD, false);
if (isOpenMPPrivate(DVar.CKind)) {
- SemaRef.Diag(ELoc, diag::err_omp_variable_in_map_and_dsa)
+ SemaRef.Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
<< getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_map)
<< getOpenMPDirectiveName(DSAS->getCurrentDirective());
ReportOriginalDSA(SemaRef, DSAS, CurDeclaration, DVar);
continue;
@@ -11006,7 +10193,8 @@ checkMappableExpressionList(Sema &SemaRef, DSAStackTy *DSAS,
// Store the components in the stack so that they can be used to check
// against other clauses later on.
- DSAS->addMappableExpressionComponents(CurDeclaration, CurComponents);
+ DSAS->addMappableExpressionComponents(CurDeclaration, CurComponents,
+ /*WhereFoundClauseKind=*/OMPC_map);
// Save the components and declaration to create the clause. For purposes of
// the clause creation, any component list that has has base 'this' uses
@@ -11274,7 +10462,7 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareReductionDirectiveEnd(
return DeclReductions;
}
-OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
+OMPClause *Sema::ActOnOpenMPNumTeamsClause(Expr *NumTeams,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
@@ -11301,8 +10489,8 @@ OMPClause *Sema::ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
/*StrictlyPositive=*/true))
return nullptr;
- return new (Context) OMPThreadLimitClause(ValExpr, StartLoc, LParenLoc,
- EndLoc);
+ return new (Context)
+ OMPThreadLimitClause(ValExpr, StartLoc, LParenLoc, EndLoc);
}
OMPClause *Sema::ActOnOpenMPPriorityClause(Expr *Priority,
@@ -11421,18 +10609,17 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause(
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc) {
// OpenMP 4.5 only supports 'defaultmap(tofrom: scalar)'
- if (M != OMPC_DEFAULTMAP_MODIFIER_tofrom ||
- Kind != OMPC_DEFAULTMAP_scalar) {
+ if (M != OMPC_DEFAULTMAP_MODIFIER_tofrom || Kind != OMPC_DEFAULTMAP_scalar) {
std::string Value;
SourceLocation Loc;
Value += "'";
if (M != OMPC_DEFAULTMAP_MODIFIER_tofrom) {
Value += getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
- OMPC_DEFAULTMAP_MODIFIER_tofrom);
+ OMPC_DEFAULTMAP_MODIFIER_tofrom);
Loc = MLoc;
} else {
Value += getOpenMPSimpleClauseTypeName(OMPC_defaultmap,
- OMPC_DEFAULTMAP_scalar);
+ OMPC_DEFAULTMAP_scalar);
Loc = KindLoc;
}
Value += "'";
@@ -11469,11 +10656,11 @@ void Sema::ActOnFinishOpenMPDeclareTargetDirective() {
IsInOpenMPDeclareTargetContext = false;
}
-void
-Sema::ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
- const DeclarationNameInfo &Id,
- OMPDeclareTargetDeclAttr::MapTypeTy MT,
- NamedDeclSetType &SameDirectiveDecls) {
+void Sema::ActOnOpenMPDeclareTargetName(Scope *CurScope,
+ CXXScopeSpec &ScopeSpec,
+ const DeclarationNameInfo &Id,
+ OMPDeclareTargetDeclAttr::MapTypeTy MT,
+ NamedDeclSetType &SameDirectiveDecls) {
LookupResult Lookup(*this, Id, LookupOrdinaryName);
LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
@@ -11671,7 +10858,10 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- SmallVector<Expr *, 8> Vars;
+ MappableVarListInfo MVLI(VarList);
+ SmallVector<Expr *, 8> PrivateCopies;
+ SmallVector<Expr *, 8> Inits;
+
for (auto &RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP use_device_ptr clause.");
SourceLocation ELoc;
@@ -11680,43 +10870,89 @@ OMPClause *Sema::ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
- Vars.push_back(RefExpr);
+ MVLI.ProcessedVarList.push_back(RefExpr);
+ PrivateCopies.push_back(nullptr);
+ Inits.push_back(nullptr);
}
ValueDecl *D = Res.first;
if (!D)
continue;
QualType Type = D->getType();
- // item should be a pointer or reference to pointer
- if (!Type.getNonReferenceType()->isPointerType()) {
+ Type = Type.getNonReferenceType().getUnqualifiedType();
+
+ auto *VD = dyn_cast<VarDecl>(D);
+
+ // Item should be a pointer or reference to pointer.
+ if (!Type->isPointerType()) {
Diag(ELoc, diag::err_omp_usedeviceptr_not_a_pointer)
<< 0 << RefExpr->getSourceRange();
continue;
}
- Vars.push_back(RefExpr->IgnoreParens());
+
+ // Build the private variable and the expression that refers to it.
+ auto VDPrivate = buildVarDecl(*this, ELoc, Type, D->getName(),
+ D->hasAttrs() ? &D->getAttrs() : nullptr);
+ if (VDPrivate->isInvalidDecl())
+ continue;
+
+ CurContext->addDecl(VDPrivate);
+ auto VDPrivateRefExpr = buildDeclRefExpr(
+ *this, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc);
+
+ // Add temporary variable to initialize the private copy of the pointer.
+ auto *VDInit =
+ buildVarDecl(*this, RefExpr->getExprLoc(), Type, ".devptr.temp");
+ auto *VDInitRefExpr = buildDeclRefExpr(*this, VDInit, RefExpr->getType(),
+ RefExpr->getExprLoc());
+ AddInitializerToDecl(VDPrivate,
+ DefaultLvalueConversion(VDInitRefExpr).get(),
+ /*DirectInit=*/false, /*TypeMayContainAuto=*/false);
+
+ // If required, build a capture to implement the privatization initialized
+ // with the current list item value.
+ DeclRefExpr *Ref = nullptr;
+ if (!VD)
+ Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ MVLI.ProcessedVarList.push_back(VD ? RefExpr->IgnoreParens() : Ref);
+ PrivateCopies.push_back(VDPrivateRefExpr);
+ Inits.push_back(VDInitRefExpr);
+
+ // We need to add a data sharing attribute for this variable to make sure it
+ // is correctly captured. A variable that shows up in a use_device_ptr has
+ // similar properties of a first private variable.
+ DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_firstprivate, Ref);
+
+ // Create a mappable component for the list item. List items in this clause
+ // only need a component.
+ MVLI.VarBaseDeclarations.push_back(D);
+ MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1);
+ MVLI.VarComponents.back().push_back(
+ OMPClauseMappableExprCommon::MappableComponent(SimpleRefExpr, D));
}
- if (Vars.empty())
+ if (MVLI.ProcessedVarList.empty())
return nullptr;
- return OMPUseDevicePtrClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- Vars);
+ return OMPUseDevicePtrClause::Create(
+ Context, StartLoc, LParenLoc, EndLoc, MVLI.ProcessedVarList,
+ PrivateCopies, Inits, MVLI.VarBaseDeclarations, MVLI.VarComponents);
}
OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
- SmallVector<Expr *, 8> Vars;
+ MappableVarListInfo MVLI(VarList);
for (auto &RefExpr : VarList) {
- assert(RefExpr && "NULL expr in OpenMP use_device_ptr clause.");
+ assert(RefExpr && "NULL expr in OpenMP is_device_ptr clause.");
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
if (Res.second) {
// It will be analyzed later.
- Vars.push_back(RefExpr);
+ MVLI.ProcessedVarList.push_back(RefExpr);
}
ValueDecl *D = Res.first;
if (!D)
@@ -11730,12 +10966,59 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
<< 0 << RefExpr->getSourceRange();
continue;
}
- Vars.push_back(RefExpr->IgnoreParens());
+
+ // Check if the declaration in the clause does not show up in any data
+ // sharing attribute.
+ auto DVar = DSAStack->getTopDSA(D, false);
+ if (isOpenMPPrivate(DVar.CKind)) {
+ Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_is_device_ptr)
+ << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ ReportOriginalDSA(*this, DSAStack, D, DVar);
+ continue;
+ }
+
+ Expr *ConflictExpr;
+ if (DSAStack->checkMappableExprComponentListsForDecl(
+ D, /*CurrentRegionOnly=*/true,
+ [&ConflictExpr](
+ OMPClauseMappableExprCommon::MappableExprComponentListRef R,
+ OpenMPClauseKind) -> bool {
+ ConflictExpr = R.front().getAssociatedExpression();
+ return true;
+ })) {
+ Diag(ELoc, diag::err_omp_map_shared_storage) << RefExpr->getSourceRange();
+ Diag(ConflictExpr->getExprLoc(), diag::note_used_here)
+ << ConflictExpr->getSourceRange();
+ continue;
+ }
+
+ // Store the components in the stack so that they can be used to check
+ // against other clauses later on.
+ OMPClauseMappableExprCommon::MappableComponent MC(SimpleRefExpr, D);
+ DSAStack->addMappableExpressionComponents(
+ D, MC, /*WhereFoundClauseKind=*/OMPC_is_device_ptr);
+
+ // Record the expression we've just processed.
+ MVLI.ProcessedVarList.push_back(SimpleRefExpr);
+
+ // Create a mappable component for the list item. List items in this clause
+ // only need a component. We use a null declaration to signal fields in
+ // 'this'.
+ assert((isa<DeclRefExpr>(SimpleRefExpr) ||
+ isa<CXXThisExpr>(cast<MemberExpr>(SimpleRefExpr)->getBase())) &&
+ "Unexpected device pointer expression!");
+ MVLI.VarBaseDeclarations.push_back(
+ isa<DeclRefExpr>(SimpleRefExpr) ? D : nullptr);
+ MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1);
+ MVLI.VarComponents.back().push_back(MC);
}
- if (Vars.empty())
+ if (MVLI.ProcessedVarList.empty())
return nullptr;
- return OMPIsDevicePtrClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- Vars);
+ return OMPIsDevicePtrClause::Create(
+ Context, StartLoc, LParenLoc, EndLoc, MVLI.ProcessedVarList,
+ MVLI.VarBaseDeclarations, MVLI.VarComponents);
}
diff --git a/lib/Sema/SemaOverload.cpp b/lib/Sema/SemaOverload.cpp
index 40d6e910f1fb..47e3df20d911 100644
--- a/lib/Sema/SemaOverload.cpp
+++ b/lib/Sema/SemaOverload.cpp
@@ -39,8 +39,9 @@ using namespace clang;
using namespace sema;
static bool functionHasPassObjectSizeParams(const FunctionDecl *FD) {
- return llvm::any_of(FD->parameters(),
- std::mem_fn(&ParmVarDecl::hasAttr<PassObjectSizeAttr>));
+ return llvm::any_of(FD->parameters(), [](const ParmVarDecl *P) {
+ return P->hasAttr<PassObjectSizeAttr>();
+ });
}
/// A convenience routine for creating a decayed reference to a function.
@@ -59,6 +60,8 @@ CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl,
// being used.
if (FoundDecl != Fn && S.DiagnoseUseOfDecl(Fn, Loc))
return ExprError();
+ if (auto *FPT = Fn->getType()->getAs<FunctionProtoType>())
+ S.ResolveExceptionSpec(Loc, FPT);
DeclRefExpr *DRE = new (S.Context) DeclRefExpr(Fn, false, Fn->getType(),
VK_LValue, Loc, LocInfo);
if (HadMultipleCandidates)
@@ -135,7 +138,8 @@ ImplicitConversionRank clang::GetConversionRank(ImplicitConversionKind Kind) {
ICR_Exact_Match, // NOTE(gbiv): This may not be completely right --
// it was omitted by the patch that added
// ICK_Zero_Event_Conversion
- ICR_C_Conversion
+ ICR_C_Conversion,
+ ICR_C_Conversion_Extension
};
return Rank[(int)Kind];
}
@@ -148,7 +152,7 @@ static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
"Lvalue-to-rvalue",
"Array-to-pointer",
"Function-to-pointer",
- "Noreturn adjustment",
+ "Function pointer conversion",
"Qualification",
"Integral promotion",
"Floating point promotion",
@@ -169,7 +173,8 @@ static const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
"Transparent Union Conversion",
"Writeback conversion",
"OpenCL Zero Event Conversion",
- "C specific type conversion"
+ "C specific type conversion",
+ "Incompatible pointer conversion"
};
return Name[Kind];
}
@@ -324,6 +329,11 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
} else if (FromType->isIntegralType(Ctx) && ToType->isRealFloatingType()) {
llvm::APSInt IntConstantValue;
const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+
+ // If it's value-dependent, we can't tell whether it's narrowing.
+ if (Initializer->isValueDependent())
+ return NK_Dependent_Narrowing;
+
if (Initializer &&
Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
// Convert the integer to the floating type.
@@ -357,6 +367,11 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
Ctx.getFloatingTypeOrder(FromType, ToType) == 1) {
// FromType is larger than ToType.
const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+
+ // If it's value-dependent, we can't tell whether it's narrowing.
+ if (Initializer->isValueDependent())
+ return NK_Dependent_Narrowing;
+
if (Initializer->isCXX11ConstantExpr(Ctx, &ConstantValue)) {
// Constant!
assert(ConstantValue.isFloat());
@@ -398,6 +413,11 @@ StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
// Not all values of FromType can be represented in ToType.
llvm::APSInt InitializerValue;
const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+
+ // If it's value-dependent, we can't tell whether it's narrowing.
+ if (Initializer->isValueDependent())
+ return NK_Dependent_Narrowing;
+
if (!Initializer->isIntegerConstantExpr(InitializerValue, Ctx)) {
// Such conversions on variables are always narrowing.
return NK_Variable_Narrowing;
@@ -575,6 +595,7 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
case Sema::TDK_TooManyArguments:
case Sema::TDK_TooFewArguments:
case Sema::TDK_MiscellaneousDeductionFailure:
+ case Sema::TDK_CUDATargetMismatch:
Result.Data = nullptr;
break;
@@ -642,6 +663,7 @@ void DeductionFailureInfo::Destroy() {
case Sema::TDK_TooFewArguments:
case Sema::TDK_InvalidExplicitArguments:
case Sema::TDK_FailedOverloadResolution:
+ case Sema::TDK_CUDATargetMismatch:
break;
case Sema::TDK_Inconsistent:
@@ -684,6 +706,7 @@ TemplateParameter DeductionFailureInfo::getTemplateParameter() {
case Sema::TDK_DeducedMismatch:
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_FailedOverloadResolution:
+ case Sema::TDK_CUDATargetMismatch:
return TemplateParameter();
case Sema::TDK_Incomplete:
@@ -715,6 +738,7 @@ TemplateArgumentList *DeductionFailureInfo::getTemplateArgumentList() {
case Sema::TDK_Underqualified:
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_FailedOverloadResolution:
+ case Sema::TDK_CUDATargetMismatch:
return nullptr;
case Sema::TDK_DeducedMismatch:
@@ -742,6 +766,7 @@ const TemplateArgument *DeductionFailureInfo::getFirstArg() {
case Sema::TDK_InvalidExplicitArguments:
case Sema::TDK_SubstitutionFailure:
case Sema::TDK_FailedOverloadResolution:
+ case Sema::TDK_CUDATargetMismatch:
return nullptr;
case Sema::TDK_Inconsistent:
@@ -769,6 +794,7 @@ const TemplateArgument *DeductionFailureInfo::getSecondArg() {
case Sema::TDK_InvalidExplicitArguments:
case Sema::TDK_SubstitutionFailure:
case Sema::TDK_FailedOverloadResolution:
+ case Sema::TDK_CUDATargetMismatch:
return nullptr;
case Sema::TDK_Inconsistent:
@@ -812,6 +838,7 @@ void OverloadCandidateSet::destroyCandidates() {
void OverloadCandidateSet::clear() {
destroyCandidates();
+ ConversionSequenceAllocator.Reset();
NumInlineSequences = 0;
Candidates.clear();
Functions.clear();
@@ -969,16 +996,23 @@ Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
Match = *I;
return Ovl_Match;
}
- } else if (isa<UsingDecl>(OldD)) {
+ } else if (isa<UsingDecl>(OldD) || isa<UsingPackDecl>(OldD)) {
// We can overload with these, which can show up when doing
// redeclaration checks for UsingDecls.
assert(Old.getLookupKind() == LookupUsingDeclName);
} else if (isa<TagDecl>(OldD)) {
// We can always overload with tags by hiding them.
- } else if (isa<UnresolvedUsingValueDecl>(OldD)) {
+ } else if (auto *UUD = dyn_cast<UnresolvedUsingValueDecl>(OldD)) {
// Optimistically assume that an unresolved using decl will
// overload; if it doesn't, we'll have to diagnose during
// template instantiation.
+ //
+ // Exception: if the scope is dependent and this is not a class
+ // member, the using declaration can only introduce an enumerator.
+ if (UUD->getQualifier()->isDependent() && !UUD->isCXXClassMember()) {
+ Match = *I;
+ return Ovl_NonFunction;
+ }
} else {
// (C++ 13p1):
// Only function declarations can be overloaded; object and type
@@ -1126,24 +1160,20 @@ bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
}
if (getLangOpts().CUDA && ConsiderCudaAttrs) {
+ // Don't allow overloading of destructors. (In theory we could, but it
+ // would be a giant change to clang.)
+ if (isa<CXXDestructorDecl>(New))
+ return false;
+
CUDAFunctionTarget NewTarget = IdentifyCUDATarget(New),
OldTarget = IdentifyCUDATarget(Old);
- if (NewTarget == CFT_InvalidTarget || NewTarget == CFT_Global)
+ if (NewTarget == CFT_InvalidTarget)
return false;
assert((OldTarget != CFT_InvalidTarget) && "Unexpected invalid target.");
- // Don't allow mixing of HD with other kinds. This guarantees that
- // we have only one viable function with this signature on any
- // side of CUDA compilation .
- // __global__ functions can't be overloaded based on attribute
- // difference because, like HD, they also exist on both sides.
- if ((NewTarget == CFT_HostDevice) || (OldTarget == CFT_HostDevice) ||
- (NewTarget == CFT_Global) || (OldTarget == CFT_Global))
- return false;
-
- // Allow overloading of functions with same signature, but
- // different CUDA target attributes.
+ // Allow overloading of functions with same signature and different CUDA
+ // target attributes.
return NewTarget != OldTarget;
}
@@ -1199,7 +1229,6 @@ TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
case OR_Success:
case OR_Deleted:
ICS.setUserDefined();
- ICS.UserDefined.Before.setAsIdentityConversion();
// C++ [over.ics.user]p4:
// A conversion of an expression of class type to the same class
// type is given Exact Match rank, and a conversion of an
@@ -1383,17 +1412,20 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
}
/// \brief Determine whether the conversion from FromType to ToType is a valid
-/// conversion that strips "noreturn" off the nested function type.
-bool Sema::IsNoReturnConversion(QualType FromType, QualType ToType,
+/// conversion that strips "noexcept" or "noreturn" off the nested function
+/// type.
+bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy) {
if (Context.hasSameUnqualifiedType(FromType, ToType))
return false;
// Permit the conversion F(t __attribute__((noreturn))) -> F(t)
+ // or F(t noexcept) -> F(t)
// where F adds one of the following at most once:
// - a pointer
// - a member pointer
// - a block pointer
+ // Changes here need matching changes in FindCompositePointerType.
CanQualType CanTo = Context.getCanonicalType(ToType);
CanQualType CanFrom = Context.getCanonicalType(FromType);
Type::TypeClass TyClass = CanTo->getTypeClass();
@@ -1406,8 +1438,13 @@ bool Sema::IsNoReturnConversion(QualType FromType, QualType ToType,
CanTo = CanTo.getAs<BlockPointerType>()->getPointeeType();
CanFrom = CanFrom.getAs<BlockPointerType>()->getPointeeType();
} else if (TyClass == Type::MemberPointer) {
- CanTo = CanTo.getAs<MemberPointerType>()->getPointeeType();
- CanFrom = CanFrom.getAs<MemberPointerType>()->getPointeeType();
+ auto ToMPT = CanTo.getAs<MemberPointerType>();
+ auto FromMPT = CanFrom.getAs<MemberPointerType>();
+ // A function pointer conversion cannot change the class of the function.
+ if (ToMPT->getClass() != FromMPT->getClass())
+ return false;
+ CanTo = ToMPT->getPointeeType();
+ CanFrom = FromMPT->getPointeeType();
} else {
return false;
}
@@ -1418,11 +1455,37 @@ bool Sema::IsNoReturnConversion(QualType FromType, QualType ToType,
return false;
}
- const FunctionType *FromFn = cast<FunctionType>(CanFrom);
- FunctionType::ExtInfo EInfo = FromFn->getExtInfo();
- if (!EInfo.getNoReturn()) return false;
+ const auto *FromFn = cast<FunctionType>(CanFrom);
+ FunctionType::ExtInfo FromEInfo = FromFn->getExtInfo();
+
+ const auto *ToFn = cast<FunctionType>(CanTo);
+ FunctionType::ExtInfo ToEInfo = ToFn->getExtInfo();
+
+ bool Changed = false;
+
+ // Drop 'noreturn' if not present in target type.
+ if (FromEInfo.getNoReturn() && !ToEInfo.getNoReturn()) {
+ FromFn = Context.adjustFunctionType(FromFn, FromEInfo.withNoReturn(false));
+ Changed = true;
+ }
+
+ // Drop 'noexcept' if not present in target type.
+ if (const auto *FromFPT = dyn_cast<FunctionProtoType>(FromFn)) {
+ const auto *ToFPT = cast<FunctionProtoType>(ToFn);
+ if (FromFPT->isNothrow(Context) && !ToFPT->isNothrow(Context)) {
+ FromFn = cast<FunctionType>(
+ Context.getFunctionType(FromFPT->getReturnType(),
+ FromFPT->getParamTypes(),
+ FromFPT->getExtProtoInfo().withExceptionSpec(
+ FunctionProtoType::ExceptionSpecInfo()))
+ .getTypePtr());
+ Changed = true;
+ }
+ }
+
+ if (!Changed)
+ return false;
- FromFn = Context.adjustFunctionType(FromFn, EInfo.withNoReturn(false));
assert(QualType(FromFn, 0).isCanonical());
if (QualType(FromFn, 0) != CanTo) return false;
@@ -1527,7 +1590,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
S.ExtractUnqualifiedFunctionType(ToType), FromType)) {
QualType resultTy;
// if the function type matches except for [[noreturn]], it's ok
- if (!S.IsNoReturnConversion(FromType,
+ if (!S.IsFunctionConversion(FromType,
S.ExtractUnqualifiedFunctionType(ToType), resultTy))
// otherwise, only a boolean conversion is standard
if (!ToType->isBooleanType())
@@ -1556,6 +1619,8 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
}
// Check that we've computed the proper type after overload resolution.
+ // FIXME: FixOverloadedFunctionReference has side-effects; we shouldn't
+ // be calling it from within an NDEBUG block.
assert(S.Context.hasSameType(
FromType,
S.FixOverloadedFunctionReference(From, AccessPair, Fn)->getType()));
@@ -1684,7 +1749,7 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
ToType == S.Context.Float128Ty));
if (Float128AndLongDouble &&
(&S.Context.getFloatTypeSemantics(S.Context.LongDoubleTy) !=
- &llvm::APFloat::IEEEdouble))
+ &llvm::APFloat::IEEEdouble()))
return false;
}
// Floating point conversions (C++ 4.8).
@@ -1720,9 +1785,6 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
// Compatible conversions (Clang extension for C function overloading)
SCS.Second = ICK_Compatible_Conversion;
FromType = ToType.getUnqualifiedType();
- } else if (S.IsNoReturnConversion(FromType, ToType, FromType)) {
- // Treat a conversion that strips "noreturn" as an identity conversion.
- SCS.Second = ICK_NoReturn_Adjustment;
} else if (IsTransparentUnionStandardConversion(S, From, ToType,
InOverloadResolution,
SCS, CStyle)) {
@@ -1738,40 +1800,47 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
From->EvaluateKnownConstInt(S.getASTContext()) == 0) {
SCS.Second = ICK_Zero_Event_Conversion;
FromType = ToType;
+ } else if (ToType->isQueueT() &&
+ From->isIntegerConstantExpr(S.getASTContext()) &&
+ (From->EvaluateKnownConstInt(S.getASTContext()) == 0)) {
+ SCS.Second = ICK_Zero_Queue_Conversion;
+ FromType = ToType;
} else {
// No second conversion required.
SCS.Second = ICK_Identity;
}
SCS.setToType(1, FromType);
- QualType CanonFrom;
- QualType CanonTo;
- // The third conversion can be a qualification conversion (C++ 4p1).
+ // The third conversion can be a function pointer conversion or a
+ // qualification conversion (C++ [conv.fctptr], [conv.qual]).
bool ObjCLifetimeConversion;
- if (S.IsQualificationConversion(FromType, ToType, CStyle,
- ObjCLifetimeConversion)) {
+ if (S.IsFunctionConversion(FromType, ToType, FromType)) {
+ // Function pointer conversions (removing 'noexcept') including removal of
+ // 'noreturn' (Clang extension).
+ SCS.Third = ICK_Function_Conversion;
+ } else if (S.IsQualificationConversion(FromType, ToType, CStyle,
+ ObjCLifetimeConversion)) {
SCS.Third = ICK_Qualification;
SCS.QualificationIncludesObjCLifetime = ObjCLifetimeConversion;
FromType = ToType;
- CanonFrom = S.Context.getCanonicalType(FromType);
- CanonTo = S.Context.getCanonicalType(ToType);
} else {
// No conversion required
SCS.Third = ICK_Identity;
+ }
- // C++ [over.best.ics]p6:
- // [...] Any difference in top-level cv-qualification is
- // subsumed by the initialization itself and does not constitute
- // a conversion. [...]
- CanonFrom = S.Context.getCanonicalType(FromType);
- CanonTo = S.Context.getCanonicalType(ToType);
- if (CanonFrom.getLocalUnqualifiedType()
- == CanonTo.getLocalUnqualifiedType() &&
- CanonFrom.getLocalQualifiers() != CanonTo.getLocalQualifiers()) {
- FromType = ToType;
- CanonFrom = CanonTo;
- }
+ // C++ [over.best.ics]p6:
+ // [...] Any difference in top-level cv-qualification is
+ // subsumed by the initialization itself and does not constitute
+ // a conversion. [...]
+ QualType CanonFrom = S.Context.getCanonicalType(FromType);
+ QualType CanonTo = S.Context.getCanonicalType(ToType);
+ if (CanonFrom.getLocalUnqualifiedType()
+ == CanonTo.getLocalUnqualifiedType() &&
+ CanonFrom.getLocalQualifiers() != CanonTo.getLocalQualifiers()) {
+ FromType = ToType;
+ CanonFrom = CanonTo;
}
+
SCS.setToType(2, FromType);
if (CanonFrom == CanonTo)
@@ -1783,22 +1852,43 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
return false;
ExprResult ER = ExprResult{From};
- auto Conv = S.CheckSingleAssignmentConstraints(ToType, ER,
- /*Diagnose=*/false,
- /*DiagnoseCFAudited=*/false,
- /*ConvertRHS=*/false);
- if (Conv != Sema::Compatible)
+ Sema::AssignConvertType Conv =
+ S.CheckSingleAssignmentConstraints(ToType, ER,
+ /*Diagnose=*/false,
+ /*DiagnoseCFAudited=*/false,
+ /*ConvertRHS=*/false);
+ ImplicitConversionKind SecondConv;
+ switch (Conv) {
+ case Sema::Compatible:
+ SecondConv = ICK_C_Only_Conversion;
+ break;
+ // For our purposes, discarding qualifiers is just as bad as using an
+ // incompatible pointer. Note that an IncompatiblePointer conversion can drop
+ // qualifiers, as well.
+ case Sema::CompatiblePointerDiscardsQualifiers:
+ case Sema::IncompatiblePointer:
+ case Sema::IncompatiblePointerSign:
+ SecondConv = ICK_Incompatible_Pointer_Conversion;
+ break;
+ default:
return false;
+ }
+
+ // First can only be an lvalue conversion, so we pretend that this was the
+ // second conversion. First should already be valid from earlier in the
+ // function.
+ SCS.Second = SecondConv;
+ SCS.setToType(1, ToType);
- SCS.setAllToTypes(ToType);
- // We need to set all three because we want this conversion to rank terribly,
- // and we don't know what conversions it may overlap with.
- SCS.First = ICK_C_Only_Conversion;
- SCS.Second = ICK_C_Only_Conversion;
- SCS.Third = ICK_C_Only_Conversion;
+ // Third is Identity, because Second should rank us worse than any other
+ // conversion. This could also be ICK_Qualification, but it's simpler to just
+ // lump everything in with the second conversion, and we don't gain anything
+ // from making this ICK_Qualification.
+ SCS.Third = ICK_Identity;
+ SCS.setToType(2, ToType);
return true;
}
-
+
static bool
IsTransparentUnionStandardConversion(Sema &S, Expr* From,
QualType &ToType,
@@ -2587,7 +2677,8 @@ enum {
ft_parameter_arity,
ft_parameter_mismatch,
ft_return_type,
- ft_qualifer_mismatch
+ ft_qualifer_mismatch,
+ ft_noexcept
};
/// Attempts to get the FunctionProtoType from a Type. Handles
@@ -2687,6 +2778,16 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
return;
}
+ // Handle exception specification differences on canonical type (in C++17
+ // onwards).
+ if (cast<FunctionProtoType>(FromFunction->getCanonicalTypeUnqualified())
+ ->isNothrow(Context) !=
+ cast<FunctionProtoType>(ToFunction->getCanonicalTypeUnqualified())
+ ->isNothrow(Context)) {
+ PDiag << ft_noexcept;
+ return;
+ }
+
// Unable to find a difference, so add no extra info.
PDiag << ft_default;
}
@@ -4098,6 +4199,7 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
DerivedToBase = false;
ObjCConversion = false;
ObjCLifetimeConversion = false;
+ QualType ConvertedT2;
if (UnqualT1 == UnqualT2) {
// Nothing to do.
} else if (isCompleteType(Loc, OrigT2) &&
@@ -4108,6 +4210,15 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
UnqualT2->isObjCObjectOrInterfaceType() &&
Context.canBindObjCObjectType(UnqualT1, UnqualT2))
ObjCConversion = true;
+ else if (UnqualT2->isFunctionType() &&
+ IsFunctionConversion(UnqualT2, UnqualT1, ConvertedT2))
+ // C++1z [dcl.init.ref]p4:
+ // cv1 T1" is reference-compatible with "cv2 T2" if [...] T2 is "noexcept
+ // function" and T1 is "function"
+ //
+ // We extend this to also apply to 'noreturn', so allow any function
+ // conversion between function types.
+ return Ref_Compatible;
else
return Ref_Incompatible;
@@ -4146,10 +4257,8 @@ Sema::CompareReferenceRelationship(SourceLocation Loc,
T1Quals.removeUnaligned();
T2Quals.removeUnaligned();
- if (T1Quals == T2Quals)
+ if (T1Quals.compatiblyIncludes(T2Quals))
return Ref_Compatible;
- else if (T1Quals.compatiblyIncludes(T2Quals))
- return Ref_Compatible_With_Added_Qualification;
else
return Ref_Related;
}
@@ -4327,8 +4436,7 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
// reference-compatible with "cv2 T2," or
//
// Per C++ [over.ics.ref]p4, we don't check the bit-field property here.
- if (InitCategory.isLValue() &&
- RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
+ if (InitCategory.isLValue() && RefRelationship == Sema::Ref_Compatible) {
// C++ [over.ics.ref]p1:
// When a parameter of reference type binds directly (8.5.3)
// to an argument expression, the implicit conversion sequence
@@ -4390,10 +4498,10 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
//
// -- is an xvalue, class prvalue, array prvalue or function
// lvalue and "cv1 T1" is reference-compatible with "cv2 T2", or
- if (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification &&
+ if (RefRelationship == Sema::Ref_Compatible &&
(InitCategory.isXValue() ||
- (InitCategory.isPRValue() && (T2->isRecordType() || T2->isArrayType())) ||
- (InitCategory.isLValue() && T2->isFunctionType()))) {
+ (InitCategory.isPRValue() && (T2->isRecordType() || T2->isArrayType())) ||
+ (InitCategory.isLValue() && T2->isFunctionType()))) {
ICS.setStandard();
ICS.Standard.First = ICK_Identity;
ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base
@@ -4540,7 +4648,6 @@ TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
return ICS;
}
- ICS.UserDefined.Before.setAsIdentityConversion();
ICS.UserDefined.After.ReferenceBinding = true;
ICS.UserDefined.After.IsLvalueReference = !isRValRef;
ICS.UserDefined.After.BindsToFunctionLvalue = false;
@@ -4693,6 +4800,9 @@ TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
// Type is an aggregate, argument is an init list. At this point it comes
// down to checking whether the initialization works.
// FIXME: Find out whether this parameter is consumed or not.
+ // FIXME: Expose SemaInit's aggregate initialization code so that we don't
+ // need to call into the initialization code here; overload resolution
+ // should not be doing that.
InitializedEntity Entity =
InitializedEntity::InitializeParameter(S.Context, ToType,
/*Consumed=*/false);
@@ -4896,7 +5006,7 @@ TryObjectArgumentInitialization(Sema &S, SourceLocation Loc, QualType FromType,
// cv-qualification on the member function declaration.
//
// However, when finding an implicit conversion sequence for the argument, we
- // are not allowed to create temporaries or perform user-defined conversions
+ // are not allowed to perform user-defined conversions
// (C++ [over.match.funcs]p5). We perform a simplified version of
// reference binding here, that allows class rvalues to bind to
// non-constant references.
@@ -5069,9 +5179,10 @@ static bool CheckConvertedConstantConversions(Sema &S,
// conversions are fine.
switch (SCS.Second) {
case ICK_Identity:
- case ICK_NoReturn_Adjustment:
+ case ICK_Function_Conversion:
case ICK_Integral_Promotion:
case ICK_Integral_Conversion: // Narrowing conversions are checked elsewhere.
+ case ICK_Zero_Queue_Conversion:
return true;
case ICK_Boolean_Conversion:
@@ -5106,6 +5217,7 @@ static bool CheckConvertedConstantConversions(Sema &S,
case ICK_Writeback_Conversion:
case ICK_Zero_Event_Conversion:
case ICK_C_Only_Conversion:
+ case ICK_Incompatible_Pointer_Conversion:
return false;
case ICK_Lvalue_To_Rvalue:
@@ -5141,12 +5253,18 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
// implicitly converted to type T, where the converted
// expression is a constant expression and the implicit conversion
// sequence contains only [... list of conversions ...].
+ // C++1z [stmt.if]p2:
+ // If the if statement is of the form if constexpr, the value of the
+ // condition shall be a contextually converted constant expression of type
+ // bool.
ImplicitConversionSequence ICS =
- TryCopyInitialization(S, From, T,
- /*SuppressUserConversions=*/false,
- /*InOverloadResolution=*/false,
- /*AllowObjcWritebackConversion=*/false,
- /*AllowExplicit=*/false);
+ CCE == Sema::CCEK_ConstexprIf
+ ? TryContextuallyConvertToBool(S, From)
+ : TryCopyInitialization(S, From, T,
+ /*SuppressUserConversions=*/false,
+ /*InOverloadResolution=*/false,
+ /*AllowObjcWritebackConversion=*/false,
+ /*AllowExplicit=*/false);
StandardConversionSequence *SCS = nullptr;
switch (ICS.getKind()) {
case ImplicitConversionSequence::StandardConversion:
@@ -5192,6 +5310,9 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
QualType PreNarrowingType;
switch (SCS->getNarrowingKind(S.Context, Result.get(), PreNarrowingValue,
PreNarrowingType)) {
+ case NK_Dependent_Narrowing:
+ // Implicit conversion to a narrower type, but the expression is
+ // value-dependent so we can't tell whether it's actually narrowing.
case NK_Variable_Narrowing:
// Implicit conversion to a narrower type, and the value is not a constant
// expression. We'll diagnose this in a moment.
@@ -5210,6 +5331,11 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
break;
}
+ if (Result.get()->isValueDependent()) {
+ Value = APValue();
+ return Result;
+ }
+
// Check the expression is a constant expression.
SmallVector<PartialDiagnosticAt, 8> Notes;
Expr::EvalResult Eval;
@@ -5256,7 +5382,7 @@ ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
APValue V;
auto R = ::CheckConvertedConstantExpression(*this, From, T, V, CCE, true);
- if (!R.isInvalid())
+ if (!R.isInvalid() && !R.get()->isValueDependent())
Value = V.getInt();
return R;
}
@@ -5310,6 +5436,7 @@ TryContextuallyConvertToObjCPointer(Sema &S, Expr *From) {
/// PerformContextuallyConvertToObjCPointer - Perform a contextual
/// conversion of the expression From to an Objective-C pointer type.
+/// Returns a valid but null ExprResult if no conversion sequence exists.
ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) {
if (checkPlaceholderForOverload(*this, From))
return ExprError();
@@ -5319,7 +5446,7 @@ ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) {
TryContextuallyConvertToObjCPointer(*this, From);
if (!ICS.isBad())
return PerformImplicitConversion(From, Ty, ICS, AA_Converting);
- return ExprError();
+ return ExprResult();
}
/// Determine whether the provided type is an integral type, or an enumeration
@@ -5817,7 +5944,7 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
// case we may not yet know what the member's target is; the target is
// inferred for the member automatically, based on the bases and fields of
// the class.
- if (!Caller->isImplicit() && CheckCUDATarget(Caller, Function)) {
+ if (!Caller->isImplicit() && !IsAllowedCUDACall(Caller, Function)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_target;
return;
@@ -5858,6 +5985,12 @@ Sema::AddOverloadCandidate(FunctionDecl *Function,
Candidate.DeductionFailure.Data = FailedAttr;
return;
}
+
+ if (LangOpts.OpenCL && isOpenCLDisabledDecl(Function)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_ext_disabled;
+ return;
+ }
}
ObjCMethodDecl *
@@ -5907,10 +6040,15 @@ Sema::SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance,
/*AllowObjCWritebackConversion=*/
getLangOpts().ObjCAutoRefCount,
/*AllowExplicit*/false);
- if (ConversionState.isBad()) {
- Match = false;
- break;
- }
+ // This function looks for a reasonably-exact match, so we consider
+ // incompatible pointer conversions to be a failure here.
+ if (ConversionState.isBad() ||
+ (ConversionState.isStandard() &&
+ ConversionState.Standard.Second ==
+ ICK_Incompatible_Pointer_Conversion)) {
+ Match = false;
+ break;
+ }
}
// Promote additional arguments to variadic methods.
if (Match && Method->isVariadic()) {
@@ -5975,7 +6113,7 @@ EnableIfAttr *Sema::CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
SmallVector<Expr *, 16> ConvertedArgs;
bool InitializationFailed = false;
- // Ignore any variadic parameters. Converting them is pointless, since the
+ // Ignore any variadic arguments. Converting them is pointless, since the
// user can't refer to them in the enable_if condition.
unsigned ArgSizeNoVarargs = std::min(Function->param_size(), Args.size());
@@ -6198,7 +6336,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// (CUDA B.1): Check for invalid calls between targets.
if (getLangOpts().CUDA)
if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
- if (CheckCUDATarget(Caller, Method)) {
+ if (!IsAllowedCUDACall(Caller, Method)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_target;
return;
@@ -7538,12 +7676,12 @@ public:
}
// C++ [over.match.oper]p16:
- // For every pointer to member type T, there exist candidate operator
- // functions of the form
+ // For every pointer to member type T or type std::nullptr_t, there
+ // exist candidate operator functions of the form
//
// bool operator==(T,T);
// bool operator!=(T,T);
- void addEqualEqualOrNotEqualMemberPointerOverloads() {
+ void addEqualEqualOrNotEqualMemberPointerOrNullptrOverloads() {
/// Set of (canonical) types that we've already handled.
llvm::SmallPtrSet<QualType, 8> AddedTypes;
@@ -7560,13 +7698,22 @@ public:
QualType ParamTypes[2] = { *MemPtr, *MemPtr };
S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet);
}
+
+ if (CandidateTypes[ArgIdx].hasNullPtrType()) {
+ CanQualType NullPtrTy = S.Context.getCanonicalType(S.Context.NullPtrTy);
+ if (AddedTypes.insert(NullPtrTy).second) {
+ QualType ParamTypes[2] = { NullPtrTy, NullPtrTy };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args,
+ CandidateSet);
+ }
+ }
}
}
// C++ [over.built]p15:
//
- // For every T, where T is an enumeration type, a pointer type, or
- // std::nullptr_t, there exist candidate operator functions of the form
+ // For every T, where T is an enumeration type or a pointer type,
+ // there exist candidate operator functions of the form
//
// bool operator<(T, T);
// bool operator>(T, T);
@@ -7651,17 +7798,6 @@ public:
QualType ParamTypes[2] = { *Enum, *Enum };
S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, CandidateSet);
}
-
- if (CandidateTypes[ArgIdx].hasNullPtrType()) {
- CanQualType NullPtrTy = S.Context.getCanonicalType(S.Context.NullPtrTy);
- if (AddedTypes.insert(NullPtrTy).second &&
- !UserDefinedBinaryOperators.count(std::make_pair(NullPtrTy,
- NullPtrTy))) {
- QualType ParamTypes[2] = { NullPtrTy, NullPtrTy };
- S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args,
- CandidateSet);
- }
- }
}
}
@@ -8357,7 +8493,7 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
case OO_EqualEqual:
case OO_ExclaimEqual:
- OpBuilder.addEqualEqualOrNotEqualMemberPointerOverloads();
+ OpBuilder.addEqualEqualOrNotEqualMemberPointerOrNullptrOverloads();
// Fall through.
case OO_Less:
@@ -8566,13 +8702,40 @@ bool clang::isBetterOverloadCandidate(Sema &S, const OverloadCandidate &Cand1,
if (Cand1.IgnoreObjectArgument || Cand2.IgnoreObjectArgument)
StartArg = 1;
+ auto IsIllFormedConversion = [&](const ImplicitConversionSequence &ICS) {
+ // We don't allow incompatible pointer conversions in C++.
+ if (!S.getLangOpts().CPlusPlus)
+ return ICS.isStandard() &&
+ ICS.Standard.Second == ICK_Incompatible_Pointer_Conversion;
+
+ // The only ill-formed conversion we allow in C++ is the string literal to
+ // char* conversion, which is only considered ill-formed after C++11.
+ return S.getLangOpts().CPlusPlus11 && !S.getLangOpts().WritableStrings &&
+ hasDeprecatedStringLiteralToCharPtrConversion(ICS);
+ };
+
+ // Define functions that don't require ill-formed conversions for a given
+ // argument to be better candidates than functions that do.
+ unsigned NumArgs = Cand1.NumConversions;
+ assert(Cand2.NumConversions == NumArgs && "Overload candidate mismatch");
+ bool HasBetterConversion = false;
+ for (unsigned ArgIdx = StartArg; ArgIdx < NumArgs; ++ArgIdx) {
+ bool Cand1Bad = IsIllFormedConversion(Cand1.Conversions[ArgIdx]);
+ bool Cand2Bad = IsIllFormedConversion(Cand2.Conversions[ArgIdx]);
+ if (Cand1Bad != Cand2Bad) {
+ if (Cand1Bad)
+ return false;
+ HasBetterConversion = true;
+ }
+ }
+
+ if (HasBetterConversion)
+ return true;
+
// C++ [over.match.best]p1:
// A viable function F1 is defined to be a better function than another
// viable function F2 if for all arguments i, ICSi(F1) is not a worse
// conversion sequence than ICSi(F2), and then...
- unsigned NumArgs = Cand1.NumConversions;
- assert(Cand2.NumConversions == NumArgs && "Overload candidate mismatch");
- bool HasBetterConversion = false;
for (unsigned ArgIdx = StartArg; ArgIdx < NumArgs; ++ArgIdx) {
switch (CompareImplicitConversionSequences(S, Loc,
Cand1.Conversions[ArgIdx],
@@ -8774,8 +8937,8 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
std::transform(begin(), end(), std::back_inserter(Candidates),
[](OverloadCandidate &Cand) { return &Cand; });
- // [CUDA] HD->H or HD->D calls are technically not allowed by CUDA
- // but accepted by both clang and NVCC. However during a particular
+ // [CUDA] HD->H or HD->D calls are technically not allowed by CUDA but
+ // are accepted by both clang and NVCC. However, during a particular
// compilation mode only one call variant is viable. We need to
// exclude non-viable overload candidates from consideration based
// only on their host/device attributes. Specifically, if one
@@ -8864,10 +9027,9 @@ enum OverloadCandidateKind {
oc_inherited_constructor_template
};
-OverloadCandidateKind ClassifyOverloadCandidate(Sema &S,
- NamedDecl *Found,
- FunctionDecl *Fn,
- std::string &Description) {
+static OverloadCandidateKind
+ClassifyOverloadCandidate(Sema &S, NamedDecl *Found, FunctionDecl *Fn,
+ std::string &Description) {
bool isTemplate = false;
if (FunctionTemplateDecl *FunTmpl = Fn->getPrimaryTemplate()) {
@@ -8960,8 +9122,9 @@ static bool checkAddressOfFunctionIsAvailable(Sema &S, const FunctionDecl *FD,
return false;
}
- auto I = llvm::find_if(
- FD->parameters(), std::mem_fn(&ParmVarDecl::hasAttr<PassObjectSizeAttr>));
+ auto I = llvm::find_if(FD->parameters(), [](const ParmVarDecl *P) {
+ return P->hasAttr<PassObjectSizeAttr>();
+ });
if (I == FD->param_end())
return true;
@@ -9003,7 +9166,7 @@ void Sema::NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
std::string FnDesc;
OverloadCandidateKind K = ClassifyOverloadCandidate(*this, Found, Fn, FnDesc);
PartialDiagnostic PD = PDiag(diag::note_ovl_candidate)
- << (unsigned) K << FnDesc;
+ << (unsigned) K << Fn << FnDesc;
HandleFunctionTypeMismatch(PD, Fn->getType(), DestType);
Diag(Fn->getLocation(), PD);
@@ -9436,9 +9599,25 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
int which = 0;
if (isa<TemplateTypeParmDecl>(ParamD))
which = 0;
- else if (isa<NonTypeTemplateParmDecl>(ParamD))
+ else if (isa<NonTypeTemplateParmDecl>(ParamD)) {
+ // Deduction might have failed because we deduced arguments of two
+ // different types for a non-type template parameter.
+ // FIXME: Use a different TDK value for this.
+ QualType T1 =
+ DeductionFailure.getFirstArg()->getNonTypeTemplateArgumentType();
+ QualType T2 =
+ DeductionFailure.getSecondArg()->getNonTypeTemplateArgumentType();
+ if (!S.Context.hasSameType(T1, T2)) {
+ S.Diag(Templated->getLocation(),
+ diag::note_ovl_candidate_inconsistent_deduction_types)
+ << ParamD->getDeclName() << *DeductionFailure.getFirstArg() << T1
+ << *DeductionFailure.getSecondArg() << T2;
+ MaybeEmitInheritedConstructorNote(S, Found);
+ return;
+ }
+
which = 1;
- else {
+ } else {
which = 2;
}
@@ -9592,6 +9771,10 @@ static void DiagnoseBadDeduction(Sema &S, NamedDecl *Found, Decl *Templated,
S.Diag(Templated->getLocation(), diag::note_ovl_candidate_bad_deduction);
MaybeEmitInheritedConstructorNote(S, Found);
return;
+ case Sema::TDK_CUDATargetMismatch:
+ S.Diag(Templated->getLocation(),
+ diag::note_cuda_ovl_candidate_target_mismatch);
+ return;
}
}
@@ -9673,6 +9856,13 @@ static void DiagnoseFailedEnableIfAttr(Sema &S, OverloadCandidate *Cand) {
<< Attr->getCond()->getSourceRange() << Attr->getMessage();
}
+static void DiagnoseOpenCLExtensionDisabled(Sema &S, OverloadCandidate *Cand) {
+ FunctionDecl *Callee = Cand->Function;
+
+ S.Diag(Callee->getLocation(),
+ diag::note_ovl_candidate_disabled_by_extension);
+}
+
/// Generates a 'note' diagnostic for an overload candidate. We've
/// already generated a primary error at the call site.
///
@@ -9750,6 +9940,9 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
case ovl_fail_enable_if:
return DiagnoseFailedEnableIfAttr(S, Cand);
+ case ovl_fail_ext_disabled:
+ return DiagnoseOpenCLExtensionDisabled(S, Cand);
+
case ovl_fail_addr_not_available: {
bool Available = checkAddressOfCandidateIsAvailable(S, Cand->Function);
(void)Available;
@@ -9848,6 +10041,7 @@ static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
case Sema::TDK_DeducedMismatch:
case Sema::TDK_NonDeducedMismatch:
case Sema::TDK_MiscellaneousDeductionFailure:
+ case Sema::TDK_CUDATargetMismatch:
return 3;
case Sema::TDK_InstantiationDepth:
@@ -10074,16 +10268,17 @@ static void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
/// PrintOverloadCandidates - When overload resolution fails, prints
/// diagnostic messages containing the candidates in the candidate
/// set.
-void OverloadCandidateSet::NoteCandidates(Sema &S,
- OverloadCandidateDisplayKind OCD,
- ArrayRef<Expr *> Args,
- StringRef Opc,
- SourceLocation OpLoc) {
+void OverloadCandidateSet::NoteCandidates(
+ Sema &S, OverloadCandidateDisplayKind OCD, ArrayRef<Expr *> Args,
+ StringRef Opc, SourceLocation OpLoc,
+ llvm::function_ref<bool(OverloadCandidate &)> Filter) {
// Sort the candidates by viability and position. Sorting directly would
// be prohibitive, so we make a set of pointers and sort those.
SmallVector<OverloadCandidate*, 32> Cands;
if (OCD == OCD_AllCandidates) Cands.reserve(size());
for (iterator Cand = begin(), LastCand = end(); Cand != LastCand; ++Cand) {
+ if (!Filter(*Cand))
+ continue;
if (Cand->Viable)
Cands.push_back(Cand);
else if (OCD == OCD_AllCandidates) {
@@ -10269,6 +10464,21 @@ QualType Sema::ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType) {
return Ret;
}
+static bool completeFunctionType(Sema &S, FunctionDecl *FD, SourceLocation Loc,
+ bool Complain = true) {
+ if (S.getLangOpts().CPlusPlus14 && FD->getReturnType()->isUndeducedType() &&
+ S.DeduceReturnType(FD, Loc, Complain))
+ return true;
+
+ auto *FPT = FD->getType()->castAs<FunctionProtoType>();
+ if (S.getLangOpts().CPlusPlus1z &&
+ isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
+ !S.ResolveExceptionSpec(Loc, FPT))
+ return true;
+
+ return false;
+}
+
namespace {
// A helper class to help with address of function resolution
// - allows us to avoid passing around all those ugly parameters
@@ -10359,7 +10569,7 @@ private:
bool candidateHasExactlyCorrectType(const FunctionDecl *FD) {
QualType Discard;
return Context.hasSameUnqualifiedType(TargetFunctionType, FD->getType()) ||
- S.IsNoReturnConversion(FD->getType(), TargetFunctionType, Discard);
+ S.IsFunctionConversion(FD->getType(), TargetFunctionType, Discard);
}
/// \return true if A is considered a better overload candidate for the
@@ -10436,7 +10646,7 @@ private:
= S.DeduceTemplateArguments(FunctionTemplate,
&OvlExplicitTemplateArgs,
TargetFunctionType, Specialization,
- Info, /*InOverloadResolution=*/true)) {
+ Info, /*IsAddressOfFunction*/true)) {
// Make a note of the failed deduction for diagnostics.
FailedCandidates.addCandidate()
.set(CurAccessFunPair, FunctionTemplate->getTemplatedDecl(),
@@ -10472,14 +10682,13 @@ private:
if (FunctionDecl *FunDecl = dyn_cast<FunctionDecl>(Fn)) {
if (S.getLangOpts().CUDA)
if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext))
- if (!Caller->isImplicit() && S.CheckCUDATarget(Caller, FunDecl))
+ if (!Caller->isImplicit() && !S.IsAllowedCUDACall(Caller, FunDecl))
return false;
// If any candidate has a placeholder return type, trigger its deduction
// now.
- if (S.getLangOpts().CPlusPlus14 &&
- FunDecl->getReturnType()->isUndeducedType() &&
- S.DeduceReturnType(FunDecl, SourceExpr->getLocStart(), Complain)) {
+ if (completeFunctionType(S, FunDecl, SourceExpr->getLocStart(),
+ Complain)) {
HasComplained |= Complain;
return false;
}
@@ -10704,6 +10913,8 @@ Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
else if (NumMatches == 1) {
Fn = Resolver.getMatchingFunctionDecl();
assert(Fn);
+ if (auto *FPT = Fn->getType()->getAs<FunctionProtoType>())
+ ResolveExceptionSpec(AddressOfExpr->getExprLoc(), FPT);
FoundResult = *Resolver.getMatchingFunctionAccessPair();
if (Complain) {
if (Resolver.IsStaticMemberFunctionFromBoundPointer())
@@ -10838,7 +11049,7 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
if (TemplateDeductionResult Result
= DeduceTemplateArguments(FunctionTemplate, &ExplicitTemplateArgs,
Specialization, Info,
- /*InOverloadResolution=*/true)) {
+ /*IsAddressOfFunction*/true)) {
// Make a note of the failed deduction for diagnostics.
// TODO: Actually use the failed-deduction info?
FailedCandidates.addCandidate()
@@ -10863,9 +11074,8 @@ Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
if (FoundResult) *FoundResult = I.getPair();
}
- if (Matched && getLangOpts().CPlusPlus14 &&
- Matched->getReturnType()->isUndeducedType() &&
- DeduceReturnType(Matched, ovl->getExprLoc(), Complain))
+ if (Matched &&
+ completeFunctionType(*this, Matched, ovl->getExprLoc(), Complain))
return nullptr;
return Matched;
@@ -11255,6 +11465,12 @@ BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
assert(!R.empty() && "lookup results empty despite recovery");
+ // If recovery created an ambiguity, just bail out.
+ if (R.isAmbiguous()) {
+ R.suppressDiagnostics();
+ return ExprError();
+ }
+
// Build an implicit member call if appropriate. Just drop the
// casts and such from the call, we don't really care.
ExprResult NewFn = ExprError();
@@ -12331,18 +12547,6 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
new (Context) CXXMemberCallExpr(Context, MemExprE, Args,
ResultType, VK, RParenLoc);
- // (CUDA B.1): Check for invalid calls between targets.
- if (getLangOpts().CUDA) {
- if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext)) {
- if (CheckCUDATarget(Caller, Method)) {
- Diag(MemExpr->getMemberLoc(), diag::err_ref_bad_target)
- << IdentifyCUDATarget(Method) << Method->getIdentifier()
- << IdentifyCUDATarget(Caller);
- return ExprError();
- }
- }
- }
-
// Check for a valid return type.
if (CheckCallReturnType(Method->getReturnType(), MemExpr->getMemberLoc(),
TheCall, Method))
@@ -12374,10 +12578,10 @@ Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
// In the case the method to call was not selected by the overloading
// resolution process, we still need to handle the enable_if attribute. Do
- // that here, so it will not hide previous -- and more relevant -- errors
- if (isa<MemberExpr>(NakedMemExpr)) {
+ // that here, so it will not hide previous -- and more relevant -- errors.
+ if (auto *MemE = dyn_cast<MemberExpr>(NakedMemExpr)) {
if (const EnableIfAttr *Attr = CheckEnableIf(Method, Args, true)) {
- Diag(MemExprE->getLocStart(),
+ Diag(MemE->getMemberLoc(),
diag::err_ovl_no_viable_member_function_in_call)
<< Method << Method->getSourceRange();
Diag(Method->getLocation(),
@@ -12619,9 +12823,9 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// Build the full argument list for the method call (the implicit object
// parameter is placed at the beginning of the list).
- std::unique_ptr<Expr * []> MethodArgs(new Expr *[Args.size() + 1]);
+ SmallVector<Expr *, 8> MethodArgs(Args.size() + 1);
MethodArgs[0] = Object.get();
- std::copy(Args.begin(), Args.end(), &MethodArgs[1]);
+ std::copy(Args.begin(), Args.end(), MethodArgs.begin() + 1);
// Once we've built TheCall, all of the expressions are properly
// owned.
@@ -12630,10 +12834,8 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
ResultTy = ResultTy.getNonLValueExprType(Context);
CXXOperatorCallExpr *TheCall = new (Context)
- CXXOperatorCallExpr(Context, OO_Call, NewFn.get(),
- llvm::makeArrayRef(MethodArgs.get(), Args.size() + 1),
- ResultTy, VK, RParenLoc, false);
- MethodArgs.reset();
+ CXXOperatorCallExpr(Context, OO_Call, NewFn.get(), MethodArgs, ResultTy,
+ VK, RParenLoc, false);
if (CheckCallReturnType(Method->getReturnType(), LParenLoc, TheCall, Method))
return true;
@@ -12996,6 +13198,31 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
ICE->getValueKind());
}
+ if (auto *GSE = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!GSE->isResultDependent()) {
+ Expr *SubExpr =
+ FixOverloadedFunctionReference(GSE->getResultExpr(), Found, Fn);
+ if (SubExpr == GSE->getResultExpr())
+ return GSE;
+
+ // Replace the resulting type information before rebuilding the generic
+ // selection expression.
+ ArrayRef<Expr *> A = GSE->getAssocExprs();
+ SmallVector<Expr *, 4> AssocExprs(A.begin(), A.end());
+ unsigned ResultIdx = GSE->getResultIndex();
+ AssocExprs[ResultIdx] = SubExpr;
+
+ return new (Context) GenericSelectionExpr(
+ Context, GSE->getGenericLoc(), GSE->getControllingExpr(),
+ GSE->getAssocTypeSourceInfos(), AssocExprs, GSE->getDefaultLoc(),
+ GSE->getRParenLoc(), GSE->containsUnexpandedParameterPack(),
+ ResultIdx);
+ }
+ // Rather than fall through to the unreachable, return the original generic
+ // selection expression.
+ return GSE;
+ }
+
if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E)) {
assert(UnOp->getOpcode() == UO_AddrOf &&
"Can only take the address of an overloaded function");
@@ -13044,6 +13271,13 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
UnOp->getOperatorLoc());
}
+ // C++ [except.spec]p17:
+ // An exception-specification is considered to be needed when:
+ // - in an expression the function is the unique lookup result or the
+ // selected member of a set of overloaded functions
+ if (auto *FPT = Fn->getType()->getAs<FunctionProtoType>())
+ ResolveExceptionSpec(E->getExprLoc(), FPT);
+
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
// FIXME: avoid copy.
TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = nullptr;
diff --git a/lib/Sema/SemaPseudoObject.cpp b/lib/Sema/SemaPseudoObject.cpp
index c93d800f96d1..8e53fda846f4 100644
--- a/lib/Sema/SemaPseudoObject.cpp
+++ b/lib/Sema/SemaPseudoObject.cpp
@@ -661,7 +661,7 @@ bool ObjCPropertyOpBuilder::findSetter(bool warn) {
if (ObjCPropertyDecl *prop1 = IFace->FindPropertyDeclaration(
AltMember, prop->getQueryKind()))
if (prop != prop1 && (prop1->getSetterMethodDecl() == setter)) {
- S.Diag(RefExpr->getExprLoc(), diag::error_property_setter_ambiguous_use)
+ S.Diag(RefExpr->getExprLoc(), diag::err_property_setter_ambiguous_use)
<< prop << prop1 << setter->getSelector();
S.Diag(prop->getLocation(), diag::note_property_declare);
S.Diag(prop1->getLocation(), diag::note_property_declare);
@@ -770,7 +770,8 @@ ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
ExprResult opResult = op;
Sema::AssignConvertType assignResult
= S.CheckSingleAssignmentConstraints(paramType, opResult);
- if (S.DiagnoseAssignmentResult(assignResult, opcLoc, paramType,
+ if (opResult.isInvalid() ||
+ S.DiagnoseAssignmentResult(assignResult, opcLoc, paramType,
op->getType(), opResult.get(),
Sema::AA_Assigning))
return ExprError();
@@ -1103,8 +1104,9 @@ Sema::ObjCSubscriptKind
Diag(FromE->getExprLoc(), diag::err_objc_multiple_subscript_type_conversion)
<< FromE->getType();
for (unsigned int i = 0; i < ConversionDecls.size(); i++)
- Diag(ConversionDecls[i]->getLocation(), diag::not_conv_function_declared_at);
-
+ Diag(ConversionDecls[i]->getLocation(),
+ diag::note_conv_function_declared_at);
+
return OS_Error;
}
@@ -1479,7 +1481,7 @@ ExprResult MSPropertyOpBuilder::buildGet() {
SourceLocation(), GetterName, nullptr);
if (GetterExpr.isInvalid()) {
S.Diag(RefExpr->getMemberLoc(),
- diag::error_cannot_find_suitable_accessor) << 0 /* getter */
+ diag::err_cannot_find_suitable_accessor) << 0 /* getter */
<< RefExpr->getPropertyDecl();
return ExprError();
}
@@ -1508,7 +1510,7 @@ ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl,
SourceLocation(), SetterName, nullptr);
if (SetterExpr.isInvalid()) {
S.Diag(RefExpr->getMemberLoc(),
- diag::error_cannot_find_suitable_accessor) << 1 /* setter */
+ diag::err_cannot_find_suitable_accessor) << 1 /* setter */
<< RefExpr->getPropertyDecl();
return ExprError();
}
diff --git a/lib/Sema/SemaStmt.cpp b/lib/Sema/SemaStmt.cpp
index 8e8104e581b2..50f0a22ff02b 100644
--- a/lib/Sema/SemaStmt.cpp
+++ b/lib/Sema/SemaStmt.cpp
@@ -536,7 +536,7 @@ StmtResult Sema::BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
if (Cond.isInvalid())
return StmtError();
- if (IsConstexpr)
+ if (IsConstexpr || isa<ObjCAvailabilityCheckExpr>(Cond.get().second))
getCurFunction()->setHasBranchProtectedScope();
DiagnoseUnusedExprResult(thenStmt);
@@ -1070,7 +1070,8 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
const EnumType *ET = CondTypeBeforePromotion->getAs<EnumType>();
// If switch has default case, then ignore it.
- if (!CaseListIsErroneous && !HasConstantCond && ET) {
+ if (!CaseListIsErroneous && !HasConstantCond && ET &&
+ ET->getDecl()->isCompleteDefinition()) {
const EnumDecl *ED = ET->getDecl();
EnumValsTy EnumVals;
@@ -3193,6 +3194,10 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (FD->isNoReturn())
Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr)
<< FD->getDeclName();
+ if (FD->isMain() && RetValExp)
+ if (isa<CXXBoolLiteralExpr>(RetValExp))
+ Diag(ReturnLoc, diag::warn_main_returns_bool_literal)
+ << RetValExp->getSourceRange();
} else if (ObjCMethodDecl *MD = getCurMethodDecl()) {
FnRetType = MD->getReturnType();
isObjCMethod = true;
@@ -3447,7 +3452,7 @@ StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw) {
!ThrowType->isObjCObjectPointerType()) {
const PointerType *PT = ThrowType->getAs<PointerType>();
if (!PT || !PT->getPointeeType()->isVoidType())
- return StmtError(Diag(AtLoc, diag::error_objc_throw_expects_object)
+ return StmtError(Diag(AtLoc, diag::err_objc_throw_expects_object)
<< Throw->getType() << Throw->getSourceRange());
}
}
@@ -3468,7 +3473,7 @@ Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
while (AtCatchParent && !AtCatchParent->isAtCatchScope())
AtCatchParent = AtCatchParent->getParent();
if (!AtCatchParent)
- return StmtError(Diag(AtLoc, diag::error_rethrow_used_outside_catch));
+ return StmtError(Diag(AtLoc, diag::err_rethrow_used_outside_catch));
}
return BuildObjCAtThrowStmt(AtLoc, Throw);
}
@@ -3489,17 +3494,19 @@ Sema::ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand) {
if (getLangOpts().CPlusPlus) {
if (RequireCompleteType(atLoc, type,
diag::err_incomplete_receiver_type))
- return Diag(atLoc, diag::error_objc_synchronized_expects_object)
+ return Diag(atLoc, diag::err_objc_synchronized_expects_object)
<< type << operand->getSourceRange();
ExprResult result = PerformContextuallyConvertToObjCPointer(operand);
+ if (result.isInvalid())
+ return ExprError();
if (!result.isUsable())
- return Diag(atLoc, diag::error_objc_synchronized_expects_object)
+ return Diag(atLoc, diag::err_objc_synchronized_expects_object)
<< type << operand->getSourceRange();
operand = result.get();
} else {
- return Diag(atLoc, diag::error_objc_synchronized_expects_object)
+ return Diag(atLoc, diag::err_objc_synchronized_expects_object)
<< type << operand->getSourceRange();
}
}
@@ -3644,6 +3651,11 @@ StmtResult Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
!getSourceManager().isInSystemHeader(TryLoc))
Diag(TryLoc, diag::err_exceptions_disabled) << "try";
+ // Exceptions aren't allowed in CUDA device code.
+ if (getLangOpts().CUDA)
+ CUDADiagIfDeviceCode(TryLoc, diag::err_cuda_device_exceptions)
+ << "try" << CurrentCUDATarget();
+
if (getCurScope() && getCurScope()->isOpenMPSimdDirectiveScope())
Diag(TryLoc, diag::err_omp_simd_region_cannot_use_stmt) << "try";
diff --git a/lib/Sema/SemaStmtAsm.cpp b/lib/Sema/SemaStmtAsm.cpp
index cd4269cd7eae..76de9e299399 100644
--- a/lib/Sema/SemaStmtAsm.cpp
+++ b/lib/Sema/SemaStmtAsm.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/TypeLoc.h"
@@ -21,8 +20,9 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/StringSet.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
using namespace clang;
using namespace sema;
@@ -138,6 +138,56 @@ static bool checkExprMemoryConstraintCompat(Sema &S, Expr *E,
return false;
}
+// Extracting the register name from the Expression value,
+// if there is no register name to extract, returns ""
+static StringRef extractRegisterName(const Expr *Expression,
+ const TargetInfo &Target) {
+ Expression = Expression->IgnoreImpCasts();
+ if (const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(Expression)) {
+ // Handle cases where the expression is a variable
+ const VarDecl *Variable = dyn_cast<VarDecl>(AsmDeclRef->getDecl());
+ if (Variable && Variable->getStorageClass() == SC_Register) {
+ if (AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>())
+ if (Target.isValidGCCRegisterName(Attr->getLabel()))
+ return Target.getNormalizedGCCRegisterName(Attr->getLabel(), true);
+ }
+ }
+ return "";
+}
+
+// Checks if there is a conflict between the input and output lists with the
+// clobbers list. If there's a conflict, returns the location of the
+// conflicted clobber, else returns nullptr
+static SourceLocation
+getClobberConflictLocation(MultiExprArg Exprs, StringLiteral **Constraints,
+ StringLiteral **Clobbers, int NumClobbers,
+ const TargetInfo &Target, ASTContext &Cont) {
+ llvm::StringSet<> InOutVars;
+ // Collect all the input and output registers from the extended asm
+ // statement in order to check for conflicts with the clobber list
+ for (unsigned int i = 0; i < Exprs.size(); ++i) {
+ StringRef Constraint = Constraints[i]->getString();
+ StringRef InOutReg = Target.getConstraintRegister(
+ Constraint, extractRegisterName(Exprs[i], Target));
+ if (InOutReg != "")
+ InOutVars.insert(InOutReg);
+ }
+ // Check for each item in the clobber list if it conflicts with the input
+ // or output
+ for (int i = 0; i < NumClobbers; ++i) {
+ StringRef Clobber = Clobbers[i]->getString();
+ // We only check registers, therefore we don't check cc and memory
+ // clobbers
+ if (Clobber == "cc" || Clobber == "memory")
+ continue;
+ Clobber = Target.getNormalizedGCCRegisterName(Clobber, true);
+ // Go over the output's registers we collected
+ if (InOutVars.count(Clobber))
+ return Clobbers[i]->getLocStart();
+ }
+ return SourceLocation();
+}
+
StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
@@ -544,6 +594,13 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
return StmtError();
}
+ // Check for conflicts between clobber list and input or output lists
+ SourceLocation ConstraintLoc =
+ getClobberConflictLocation(Exprs, Constraints, Clobbers, NumClobbers,
+ Context.getTargetInfo(), Context);
+ if (ConstraintLoc.isValid())
+ return Diag(ConstraintLoc, diag::error_inoutput_conflict_with_clobber);
+
return NS;
}
@@ -751,17 +808,17 @@ LabelDecl *Sema::GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
// Otherwise, insert it, but only resolve it if we have seen the label itself.
std::string InternalName;
llvm::raw_string_ostream OS(InternalName);
- // Create an internal name for the label. The name should not be a valid mangled
- // name, and should be unique. We use a dot to make the name an invalid mangled
- // name.
- OS << "__MSASMLABEL_." << MSAsmLabelNameCounter++ << "__";
- for (auto it = ExternalLabelName.begin(); it != ExternalLabelName.end();
- ++it) {
- OS << *it;
- if (*it == '$') {
- // We escape '$' in asm strings by replacing it with "$$"
+ // Create an internal name for the label. The name should not be a valid
+ // mangled name, and should be unique. We use a dot to make the name an
+ // invalid mangled name. We use LLVM's inline asm ${:uid} escape so that a
+ // unique label is generated each time this blob is emitted, even after
+ // inlining or LTO.
+ OS << "__MSASMLABEL_.${:uid}__";
+ for (char C : ExternalLabelName) {
+ OS << C;
+ // We escape '$' in asm strings by replacing it with "$$"
+ if (C == '$')
OS << '$';
- }
}
Label->setMSAsmLabel(OS.str());
}
diff --git a/lib/Sema/SemaStmtAttr.cpp b/lib/Sema/SemaStmtAttr.cpp
index 87fd88939572..01fa856132d7 100644
--- a/lib/Sema/SemaStmtAttr.cpp
+++ b/lib/Sema/SemaStmtAttr.cpp
@@ -225,16 +225,12 @@ CheckForIncompatibleAttributes(Sema &S,
static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const AttributeList &A,
SourceRange Range) {
- // OpenCL v2.0 s6.11.5 - opencl_unroll_hint can have 0 arguments (compiler
+ // Although the feature was introduced only in OpenCL C v2.0 s6.11.5, it's
+ // useful for OpenCL 1.x too and doesn't require HW support.
+ // opencl_unroll_hint can have 0 arguments (compiler
// determines unrolling factor) or 1 argument (the unroll factor provided
// by the user).
- if (S.getLangOpts().OpenCLVersion < 200) {
- S.Diag(A.getLoc(), diag::err_attribute_requires_opencl_version)
- << A.getName() << "2.0" << 1;
- return nullptr;
- }
-
unsigned NumArgs = A.getNumArgs();
if (NumArgs > 1) {
diff --git a/lib/Sema/SemaTemplate.cpp b/lib/Sema/SemaTemplate.cpp
index 72e499342f8f..facc5d1b375b 100644
--- a/lib/Sema/SemaTemplate.cpp
+++ b/lib/Sema/SemaTemplate.cpp
@@ -88,14 +88,14 @@ static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
return nullptr;
}
-void Sema::FilterAcceptableTemplateNames(LookupResult &R,
+void Sema::FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates) {
// The set of class templates we've already seen.
llvm::SmallPtrSet<ClassTemplateDecl *, 8> ClassTemplates;
LookupResult::Filter filter = R.makeFilter();
while (filter.hasNext()) {
NamedDecl *Orig = filter.next();
- NamedDecl *Repl = isAcceptableTemplateName(Context, Orig,
+ NamedDecl *Repl = isAcceptableTemplateName(Context, Orig,
AllowFunctionTemplates);
if (!Repl)
filter.erase();
@@ -131,7 +131,7 @@ bool Sema::hasAnyAcceptableTemplateNames(LookupResult &R,
for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I)
if (isAcceptableTemplateName(Context, *I, AllowFunctionTemplates))
return true;
-
+
return false;
}
@@ -265,7 +265,7 @@ void Sema::LookupTemplateName(LookupResult &Found,
assert((isDependent || !ObjectType->isIncompleteType() ||
ObjectType->castAs<TagType>()->isBeingDefined()) &&
"Caller should have completed object type");
-
+
// Template names cannot appear inside an Objective-C class or object type.
if (ObjectType->isObjCObjectOrInterfaceType()) {
Found.clear();
@@ -312,7 +312,7 @@ void Sema::LookupTemplateName(LookupResult &Found,
} else {
// Perform unqualified name lookup in the current scope.
LookupName(Found, S);
-
+
if (!ObjectType.isNull())
AllowFunctionTemplatesInLookup = false;
}
@@ -429,7 +429,12 @@ Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
bool MightBeCxx11UnevalField =
getLangOpts().CPlusPlus11 && isUnevaluatedContext();
- if (!MightBeCxx11UnevalField && !isAddressOfOperand &&
+ // Check if the nested name specifier is an enum type.
+ bool IsEnum = false;
+ if (NestedNameSpecifier *NNS = SS.getScopeRep())
+ IsEnum = dyn_cast_or_null<EnumType>(NNS->getAsType());
+
+ if (!MightBeCxx11UnevalField && !isAddressOfOperand && !IsEnum &&
isa<CXXMethodDecl>(DC) && cast<CXXMethodDecl>(DC)->isInstance()) {
QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType(Context);
@@ -456,6 +461,104 @@ Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
TemplateArgs);
}
+
+/// Determine whether we would be unable to instantiate this template (because
+/// it either has no definition, or is in the process of being instantiated).
+bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
+ NamedDecl *Instantiation,
+ bool InstantiatedFromMember,
+ const NamedDecl *Pattern,
+ const NamedDecl *PatternDef,
+ TemplateSpecializationKind TSK,
+ bool Complain /*= true*/) {
+ assert(isa<TagDecl>(Instantiation) || isa<FunctionDecl>(Instantiation) ||
+ isa<VarDecl>(Instantiation));
+
+ bool IsEntityBeingDefined = false;
+ if (const TagDecl *TD = dyn_cast_or_null<TagDecl>(PatternDef))
+ IsEntityBeingDefined = TD->isBeingDefined();
+
+ if (PatternDef && !IsEntityBeingDefined) {
+ NamedDecl *SuggestedDef = nullptr;
+ if (!hasVisibleDefinition(const_cast<NamedDecl*>(PatternDef), &SuggestedDef,
+ /*OnlyNeedComplete*/false)) {
+ // If we're allowed to diagnose this and recover, do so.
+ bool Recover = Complain && !isSFINAEContext();
+ if (Complain)
+ diagnoseMissingImport(PointOfInstantiation, SuggestedDef,
+ Sema::MissingImportKind::Definition, Recover);
+ return !Recover;
+ }
+ return false;
+ }
+
+ if (!Complain || (PatternDef && PatternDef->isInvalidDecl()))
+ return true;
+
+ llvm::Optional<unsigned> Note;
+ QualType InstantiationTy;
+ if (TagDecl *TD = dyn_cast<TagDecl>(Instantiation))
+ InstantiationTy = Context.getTypeDeclType(TD);
+ if (PatternDef) {
+ Diag(PointOfInstantiation,
+ diag::err_template_instantiate_within_definition)
+ << /*implicit|explicit*/(TSK != TSK_ImplicitInstantiation)
+ << InstantiationTy;
+ // Not much point in noting the template declaration here, since
+ // we're lexically inside it.
+ Instantiation->setInvalidDecl();
+ } else if (InstantiatedFromMember) {
+ if (isa<FunctionDecl>(Instantiation)) {
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_member)
+ << /*member function*/ 1 << Instantiation->getDeclName()
+ << Instantiation->getDeclContext();
+ Note = diag::note_explicit_instantiation_here;
+ } else {
+ assert(isa<TagDecl>(Instantiation) && "Must be a TagDecl!");
+ Diag(PointOfInstantiation,
+ diag::err_implicit_instantiate_member_undefined)
+ << InstantiationTy;
+ Note = diag::note_member_declared_at;
+ }
+ } else {
+ if (isa<FunctionDecl>(Instantiation)) {
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_func_template)
+ << Pattern;
+ Note = diag::note_explicit_instantiation_here;
+ } else if (isa<TagDecl>(Instantiation)) {
+ Diag(PointOfInstantiation, diag::err_template_instantiate_undefined)
+ << (TSK != TSK_ImplicitInstantiation)
+ << InstantiationTy;
+ Note = diag::note_template_decl_here;
+ } else {
+ assert(isa<VarDecl>(Instantiation) && "Must be a VarDecl!");
+ if (isa<VarTemplateSpecializationDecl>(Instantiation)) {
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_var_template)
+ << Instantiation;
+ Instantiation->setInvalidDecl();
+ } else
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_member)
+ << /*static data member*/ 2 << Instantiation->getDeclName()
+ << Instantiation->getDeclContext();
+ Note = diag::note_explicit_instantiation_here;
+ }
+ }
+ if (Note) // Diagnostics were emitted.
+ Diag(Pattern->getLocation(), Note.getValue());
+
+ // In general, Instantiation isn't marked invalid to get more than one
+ // error for multiple undefined instantiations. But the code that does
+ // explicit declaration -> explicit definition conversion can't handle
+ // invalid declarations, so mark as invalid in that case.
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ Instantiation->setInvalidDecl();
+ return true;
+}
+
/// DiagnoseTemplateParameterShadow - Produce a diagnostic complaining
/// that the template parameter 'PrevDecl' is being shadowed by a new
/// declaration at location Loc. Returns true to indicate that this is
@@ -626,8 +729,22 @@ Decl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
///
/// \returns the (possibly-promoted) parameter type if valid;
/// otherwise, produces a diagnostic and returns a NULL type.
-QualType
-Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) {
+QualType Sema::CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
+ SourceLocation Loc) {
+ if (TSI->getType()->isUndeducedType()) {
+ // C++1z [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains
+ // - an identifier associated by name lookup with a non-type
+ // template-parameter declared with a type that contains a
+ // placeholder type (7.1.7.4),
+ TSI = SubstAutoTypeSourceInfo(TSI, Context.DependentTy);
+ }
+
+ return CheckNonTypeTemplateParameterType(TSI->getType(), Loc);
+}
+
+QualType Sema::CheckNonTypeTemplateParameterType(QualType T,
+ SourceLocation Loc) {
// We don't allow variably-modified types as the type of non-type template
// parameters.
if (T->isVariablyModifiedType()) {
@@ -653,7 +770,9 @@ Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) {
T->isNullPtrType() ||
// If T is a dependent type, we can't do the check now, so we
// assume that it is well-formed.
- T->isDependentType()) {
+ T->isDependentType() ||
+ // Allow use of auto in template parameter declarations.
+ T->isUndeducedType()) {
// C++ [temp.param]p5: The top-level cv-qualifiers on the template-parameter
// are ignored when determining its type.
return T.getUnqualifiedType();
@@ -679,13 +798,18 @@ Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
SourceLocation EqualLoc,
Expr *Default) {
TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
- QualType T = TInfo->getType();
+
+ if (TInfo->getType()->isUndeducedType()) {
+ Diag(D.getIdentifierLoc(),
+ diag::warn_cxx14_compat_template_nontype_parm_auto_type)
+ << QualType(TInfo->getType()->getContainedAutoType(), 0);
+ }
assert(S->isTemplateParamScope() &&
"Non-type template parameter not in template parameter scope!");
bool Invalid = false;
- T = CheckNonTypeTemplateParameterType(T, D.getIdentifierLoc());
+ QualType T = CheckNonTypeTemplateParameterType(TInfo, D.getIdentifierLoc());
if (T.isNull()) {
T = Context.IntTy; // Recover with an 'int' type.
Invalid = true;
@@ -766,7 +890,7 @@ Decl *Sema::ActOnTemplateTemplateParameter(Scope* S,
Depth, Position, IsParameterPack,
Name, Params);
Param->setAccess(AS_public);
-
+
// If the template template parameter has a name, then link the identifier
// into the scope and lookup mechanisms.
if (Name) {
@@ -832,11 +956,10 @@ Sema::ActOnTemplateParameterList(unsigned Depth,
if (ExportLoc.isValid())
Diag(ExportLoc, diag::warn_template_export_unsupported);
- // FIXME: store RequiresClause
return TemplateParameterList::Create(
Context, TemplateLoc, LAngleLoc,
llvm::makeArrayRef((NamedDecl *const *)Params.data(), Params.size()),
- RAngleLoc);
+ RAngleLoc, RequiresClause);
}
static void SetNestedNameSpecifier(TagDecl *T, const CXXScopeSpec &SS) {
@@ -897,8 +1020,8 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
if (RequireCompleteDeclContext(SS, SemanticContext))
return true;
- // If we're adding a template to a dependent context, we may need to
- // rebuilding some of the types used within the template parameter list,
+ // If we're adding a template to a dependent context, we may need to
+ // rebuilding some of the types used within the template parameter list,
// now that we know what the current instantiation is.
if (SemanticContext->isDependentContext()) {
ContextRAII SavedContext(*this, SemanticContext);
@@ -1124,10 +1247,10 @@ Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
DeclarationName(Name), TemplateParams,
NewClass, PrevClassTemplate);
NewClass->setDescribedClassTemplate(NewTemplate);
-
+
if (ModulePrivateLoc.isValid())
NewTemplate->setModulePrivate();
-
+
// Build the type for the class template declaration now.
QualType T = NewTemplate->getInjectedClassNameSpecialization();
T = Context.getInjectedClassNameType(NewClass, T);
@@ -1218,7 +1341,7 @@ static bool DiagnoseDefaultTemplateArgument(Sema &S,
// A default template-argument shall not be specified in a
// function template declaration or a function template
// definition [...]
- // If a friend function template declaration specifies a default
+ // If a friend function template declaration specifies a default
// template-argument, that declaration shall be a definition and shall be
// the only declaration of the function template in the translation unit.
// (C++98/03 doesn't have this wording; see DR226).
@@ -1530,12 +1653,22 @@ struct DependencyChecker : RecursiveASTVisitor<DependencyChecker> {
typedef RecursiveASTVisitor<DependencyChecker> super;
unsigned Depth;
+
+ // Whether we're looking for a use of a template parameter that makes the
+ // overall construct type-dependent / a dependent type. This is strictly
+ // best-effort for now; we may fail to match at all for a dependent type
+ // in some cases if this is set.
+ bool IgnoreNonTypeDependent;
+
bool Match;
SourceLocation MatchLoc;
- DependencyChecker(unsigned Depth) : Depth(Depth), Match(false) {}
+ DependencyChecker(unsigned Depth, bool IgnoreNonTypeDependent)
+ : Depth(Depth), IgnoreNonTypeDependent(IgnoreNonTypeDependent),
+ Match(false) {}
- DependencyChecker(TemplateParameterList *Params) : Match(false) {
+ DependencyChecker(TemplateParameterList *Params, bool IgnoreNonTypeDependent)
+ : IgnoreNonTypeDependent(IgnoreNonTypeDependent), Match(false) {
NamedDecl *ND = Params->getParam(0);
if (TemplateTypeParmDecl *PD = dyn_cast<TemplateTypeParmDecl>(ND)) {
Depth = PD->getDepth();
@@ -1556,12 +1689,31 @@ struct DependencyChecker : RecursiveASTVisitor<DependencyChecker> {
return false;
}
+ bool TraverseStmt(Stmt *S, DataRecursionQueue *Q = nullptr) {
+ // Prune out non-type-dependent expressions if requested. This can
+ // sometimes result in us failing to find a template parameter reference
+ // (if a value-dependent expression creates a dependent type), but this
+ // mode is best-effort only.
+ if (auto *E = dyn_cast_or_null<Expr>(S))
+ if (IgnoreNonTypeDependent && !E->isTypeDependent())
+ return true;
+ return super::TraverseStmt(S, Q);
+ }
+
+ bool TraverseTypeLoc(TypeLoc TL) {
+ if (IgnoreNonTypeDependent && !TL.isNull() &&
+ !TL.getType()->isDependentType())
+ return true;
+ return super::TraverseTypeLoc(TL);
+ }
+
bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
return !Matches(TL.getTypePtr()->getDepth(), TL.getNameLoc());
}
bool VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
- return !Matches(T->getDepth());
+ // For a best-effort search, keep looking until we find a location.
+ return IgnoreNonTypeDependent || !Matches(T->getDepth());
}
bool TraverseTemplateName(TemplateName N) {
@@ -1599,7 +1751,7 @@ struct DependencyChecker : RecursiveASTVisitor<DependencyChecker> {
/// list.
static bool
DependsOnTemplateParameters(QualType T, TemplateParameterList *Params) {
- DependencyChecker Checker(Params);
+ DependencyChecker Checker(Params, /*IgnoreNonTypeDependent*/false);
Checker.TraverseType(T);
return Checker.Match;
}
@@ -1616,10 +1768,10 @@ static SourceRange getRangeOfTypeInNestedNameSpecifier(ASTContext &Context,
return NNSLoc.getTypeLoc().getSourceRange();
} else
break;
-
+
NNSLoc = NNSLoc.getPrefix();
}
-
+
return SourceRange();
}
@@ -1662,34 +1814,34 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
bool &IsExplicitSpecialization, bool &Invalid) {
IsExplicitSpecialization = false;
Invalid = false;
-
+
// The sequence of nested types to which we will match up the template
// parameter lists. We first build this list by starting with the type named
// by the nested-name-specifier and walking out until we run out of types.
SmallVector<QualType, 4> NestedTypes;
QualType T;
if (SS.getScopeRep()) {
- if (CXXRecordDecl *Record
+ if (CXXRecordDecl *Record
= dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS, true)))
T = Context.getTypeDeclType(Record);
else
T = QualType(SS.getScopeRep()->getAsType(), 0);
}
-
+
// If we found an explicit specialization that prevents us from needing
// 'template<>' headers, this will be set to the location of that
// explicit specialization.
SourceLocation ExplicitSpecLoc;
-
+
while (!T.isNull()) {
NestedTypes.push_back(T);
-
+
// Retrieve the parent of a record type.
if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) {
// If this type is an explicit specialization, we're done.
if (ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
- if (!isa<ClassTemplatePartialSpecializationDecl>(Spec) &&
+ if (!isa<ClassTemplatePartialSpecializationDecl>(Spec) &&
Spec->getSpecializationKind() == TSK_ExplicitSpecialization) {
ExplicitSpecLoc = Spec->getLocation();
break;
@@ -1699,14 +1851,14 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
ExplicitSpecLoc = Record->getLocation();
break;
}
-
+
if (TypeDecl *Parent = dyn_cast<TypeDecl>(Record->getParent()))
T = Context.getTypeDeclType(Parent);
else
T = QualType();
continue;
- }
-
+ }
+
if (const TemplateSpecializationType *TST
= T->getAs<TemplateSpecializationType>()) {
if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) {
@@ -1714,10 +1866,10 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
T = Context.getTypeDeclType(Parent);
else
T = QualType();
- continue;
+ continue;
}
}
-
+
// Look one step prior in a dependent template specialization type.
if (const DependentTemplateSpecializationType *DependentTST
= T->getAs<DependentTemplateSpecializationType>()) {
@@ -1727,7 +1879,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
T = QualType();
continue;
}
-
+
// Look one step prior in a dependent name type.
if (const DependentNameType *DependentName = T->getAs<DependentNameType>()){
if (NestedNameSpecifier *NNS = DependentName->getQualifier())
@@ -1736,18 +1888,18 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
T = QualType();
continue;
}
-
+
// Retrieve the parent of an enumeration type.
if (const EnumType *EnumT = T->getAs<EnumType>()) {
// FIXME: Forward-declared enums require a TSK_ExplicitSpecialization
// check here.
EnumDecl *Enum = EnumT->getDecl();
-
+
// Get to the parent type.
if (TypeDecl *Parent = dyn_cast<TypeDecl>(Enum->getParent()))
T = Context.getTypeDeclType(Parent);
else
- T = QualType();
+ T = QualType();
continue;
}
@@ -1799,21 +1951,21 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
for (unsigned TypeIdx = 0, NumTypes = NestedTypes.size(); TypeIdx != NumTypes;
++TypeIdx) {
T = NestedTypes[TypeIdx];
-
+
// Whether we expect a 'template<>' header.
bool NeedEmptyTemplateHeader = false;
// Whether we expect a template header with parameters.
bool NeedNonemptyTemplateHeader = false;
-
+
// For a dependent type, the set of template parameters that we
// expect to see.
TemplateParameterList *ExpectedTemplateParams = nullptr;
// C++0x [temp.expl.spec]p15:
- // A member or a member template may be nested within many enclosing
- // class templates. In an explicit specialization for such a member, the
- // member declaration shall be preceded by a template<> for each
+ // A member or a member template may be nested within many enclosing
+ // class templates. In an explicit specialization for such a member, the
+ // member declaration shall be preceded by a template<> for each
// enclosing class template that is explicitly specialized.
if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) {
if (ClassTemplatePartialSpecializationDecl *Partial
@@ -1830,38 +1982,38 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
= dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
// C++0x [temp.expl.spec]p4:
// Members of an explicitly specialized class template are defined
- // in the same manner as members of normal classes, and not using
- // the template<> syntax.
+ // in the same manner as members of normal classes, and not using
+ // the template<> syntax.
if (Spec->getSpecializationKind() != TSK_ExplicitSpecialization)
NeedEmptyTemplateHeader = true;
else
continue;
} else if (Record->getTemplateSpecializationKind()) {
- if (Record->getTemplateSpecializationKind()
+ if (Record->getTemplateSpecializationKind()
!= TSK_ExplicitSpecialization &&
TypeIdx == NumTypes - 1)
IsExplicitSpecialization = true;
-
+
continue;
}
} else if (const TemplateSpecializationType *TST
= T->getAs<TemplateSpecializationType>()) {
if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) {
ExpectedTemplateParams = Template->getTemplateParameters();
- NeedNonemptyTemplateHeader = true;
+ NeedNonemptyTemplateHeader = true;
}
} else if (T->getAs<DependentTemplateSpecializationType>()) {
// FIXME: We actually could/should check the template arguments here
// against the corresponding template parameter list.
NeedNonemptyTemplateHeader = false;
- }
-
+ }
+
// C++ [temp.expl.spec]p16:
- // In an explicit specialization declaration for a member of a class
- // template or a member template that ap- pears in namespace scope, the
- // member template and some of its enclosing class templates may remain
- // unspecialized, except that the declaration shall not explicitly
- // specialize a class member template if its en- closing class templates
+ // In an explicit specialization declaration for a member of a class
+ // template or a member template that ap- pears in namespace scope, the
+ // member template and some of its enclosing class templates may remain
+ // unspecialized, except that the declaration shall not explicitly
+ // specialize a class member template if its en- closing class templates
// are not explicitly specialized as well.
if (ParamIdx < ParamLists.size()) {
if (ParamLists[ParamIdx]->size() == 0) {
@@ -1871,7 +2023,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
} else
SawNonEmptyTemplateParameterList = true;
}
-
+
if (NeedEmptyTemplateHeader) {
// If we're on the last of the types, and we need a 'template<>' header
// here, then it's an explicit specialization.
@@ -1881,7 +2033,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
if (ParamIdx < ParamLists.size()) {
if (ParamLists[ParamIdx]->size() > 0) {
// The header has template parameters when it shouldn't. Complain.
- Diag(ParamLists[ParamIdx]->getTemplateLoc(),
+ Diag(ParamLists[ParamIdx]->getTemplateLoc(),
diag::err_template_param_list_matches_nontemplate)
<< T
<< SourceRange(ParamLists[ParamIdx]->getLAngleLoc(),
@@ -1913,7 +2065,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
if (ParamIdx < ParamLists.size() &&
DependsOnTemplateParameters(T, ParamLists[ParamIdx]))
ExpectedTemplateParams = nullptr;
- else
+ else
continue;
}
@@ -1929,11 +2081,11 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
CheckTemplateParameterList(ParamLists[ParamIdx], nullptr,
TPC_ClassTemplateMember))
Invalid = true;
-
+
++ParamIdx;
continue;
}
-
+
Diag(DeclLoc, diag::err_template_spec_needs_template_parameters)
<< T
<< getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
@@ -1956,7 +2108,7 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// Fabricate an empty template parameter list for the invented header.
return TemplateParameterList::Create(Context, SourceLocation(),
SourceLocation(), None,
- SourceLocation());
+ SourceLocation(), nullptr);
}
return nullptr;
@@ -1983,10 +2135,10 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
// not required, and there were any 'template<>' headers, note where the
// specialization occurred.
if (ExplicitSpecLoc.isValid() && HasAnyExplicitSpecHeader)
- Diag(ExplicitSpecLoc,
+ Diag(ExplicitSpecLoc,
diag::note_explicit_template_spec_does_not_need_header)
<< NestedTypes.back();
-
+
// We have a template parameter list with no corresponding scope, which
// means that the resulting template declaration can't be instantiated
// properly (we'll end up with dependent nodes when we shouldn't).
@@ -1995,11 +2147,11 @@ TemplateParameterList *Sema::MatchTemplateParametersToScopeSpecifier(
}
// C++ [temp.expl.spec]p16:
- // In an explicit specialization declaration for a member of a class
- // template or a member template that ap- pears in namespace scope, the
- // member template and some of its enclosing class templates may remain
- // unspecialized, except that the declaration shall not explicitly
- // specialize a class member template if its en- closing class templates
+ // In an explicit specialization declaration for a member of a class
+ // template or a member template that ap- pears in namespace scope, the
+ // member template and some of its enclosing class templates may remain
+ // unspecialized, except that the declaration shall not explicitly
+ // specialize a class member template if its en- closing class templates
// are not explicitly specialized as well.
if (ParamLists.back()->size() == 0 &&
CheckExplicitSpecialization(ParamLists[ParamIdx]->getSourceRange(),
@@ -2024,14 +2176,14 @@ void Sema::NoteAllFoundTemplates(TemplateName Name) {
<< Template->getDeclName();
return;
}
-
+
if (OverloadedTemplateStorage *OST = Name.getAsOverloadedTemplate()) {
- for (OverloadedTemplateStorage::iterator I = OST->begin(),
+ for (OverloadedTemplateStorage::iterator I = OST->begin(),
IEnd = OST->end();
I != IEnd; ++I)
Diag((*I)->getLocation(), diag::note_template_declared_here)
<< 0 << (*I)->getDeclName();
-
+
return;
}
}
@@ -2074,11 +2226,8 @@ checkBuiltinTemplateIdType(Sema &SemaRef, BuiltinTemplateDecl *BTD,
for (llvm::APSInt I(NumArgs.getBitWidth(), NumArgs.isUnsigned());
I < NumArgs; ++I) {
TemplateArgument TA(Context, I, ArgTy);
- Expr *E = SemaRef.BuildExpressionFromIntegralTemplateArgument(
- TA, TemplateArgs[2].getLocation())
- .getAs<Expr>();
- SyntheticTemplateArgs.addArgument(
- TemplateArgumentLoc(TemplateArgument(E), E));
+ SyntheticTemplateArgs.addArgument(SemaRef.getTrivialTemplateArgumentLoc(
+ TA, ArgTy, TemplateArgs[2].getLocation()));
}
// The first template argument will be reused as the template decl that
// our synthetic template arguments will be applied to.
@@ -2310,7 +2459,7 @@ Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
}
-
+
QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs);
if (Result.isNull())
@@ -2337,7 +2486,7 @@ Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
ElabTL.setElaboratedKeywordLoc(SourceLocation());
ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
}
-
+
return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
}
@@ -2352,11 +2501,11 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc) {
TemplateName Template = TemplateD.get();
-
+
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
-
+
// Determine the tag kind
TagTypeKind TagKind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
ElaboratedTypeKeyword Keyword
@@ -2364,11 +2513,11 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
QualType T = Context.getDependentTemplateSpecializationType(Keyword,
- DTN->getQualifier(),
- DTN->getIdentifier(),
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
TemplateArgs);
-
- // Build type-source information.
+
+ // Build type-source information.
TypeLocBuilder TLB;
DependentTemplateSpecializationTypeLoc SpecTL
= TLB.push<DependentTemplateSpecializationTypeLoc>(T);
@@ -2389,21 +2538,22 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
// If the identifier resolves to a typedef-name or the simple-template-id
// resolves to an alias template specialization, the
// elaborated-type-specifier is ill-formed.
- Diag(TemplateLoc, diag::err_tag_reference_non_tag) << 4;
+ Diag(TemplateLoc, diag::err_tag_reference_non_tag)
+ << TAT << NTK_TypeAliasTemplate << TagKind;
Diag(TAT->getLocation(), diag::note_declared_at);
}
-
+
QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs);
if (Result.isNull())
return TypeResult(true);
-
+
// Check the tag kind
if (const RecordType *RT = Result->getAs<RecordType>()) {
RecordDecl *D = RT->getDecl();
-
+
IdentifierInfo *Id = D->getIdentifier();
assert(Id && "templated class must have an identifier");
-
+
if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TUK_Definition,
TagLoc, Id)) {
Diag(TagLoc, diag::err_use_with_wrong_tag)
@@ -2433,10 +2583,6 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
}
-static bool CheckTemplatePartialSpecializationArgs(
- Sema &S, SourceLocation NameLoc, TemplateParameterList *TemplateParams,
- unsigned ExplicitArgs, SmallVectorImpl<TemplateArgument> &TemplateArgs);
-
static bool CheckTemplateSpecializationScope(Sema &S, NamedDecl *Specialized,
NamedDecl *PrevDecl,
SourceLocation Loc,
@@ -2518,6 +2664,89 @@ makeTemplateArgumentListInfo(Sema &S, TemplateIdAnnotation &TemplateId) {
return TemplateArgs;
}
+template<typename PartialSpecDecl>
+static void checkMoreSpecializedThanPrimary(Sema &S, PartialSpecDecl *Partial) {
+ if (Partial->getDeclContext()->isDependentContext())
+ return;
+
+ // FIXME: Get the TDK from deduction in order to provide better diagnostics
+ // for non-substitution-failure issues?
+ TemplateDeductionInfo Info(Partial->getLocation());
+ if (S.isMoreSpecializedThanPrimary(Partial, Info))
+ return;
+
+ auto *Template = Partial->getSpecializedTemplate();
+ S.Diag(Partial->getLocation(),
+ diag::ext_partial_spec_not_more_specialized_than_primary)
+ << isa<VarTemplateDecl>(Template);
+
+ if (Info.hasSFINAEDiagnostic()) {
+ PartialDiagnosticAt Diag = {SourceLocation(),
+ PartialDiagnostic::NullDiagnostic()};
+ Info.takeSFINAEDiagnostic(Diag);
+ SmallString<128> SFINAEArgString;
+ Diag.second.EmitToString(S.getDiagnostics(), SFINAEArgString);
+ S.Diag(Diag.first,
+ diag::note_partial_spec_not_more_specialized_than_primary)
+ << SFINAEArgString;
+ }
+
+ S.Diag(Template->getLocation(), diag::note_template_decl_here);
+}
+
+template<typename PartialSpecDecl>
+static void checkTemplatePartialSpecialization(Sema &S,
+ PartialSpecDecl *Partial) {
+ // C++1z [temp.class.spec]p8: (DR1495)
+ // - The specialization shall be more specialized than the primary
+ // template (14.5.5.2).
+ checkMoreSpecializedThanPrimary(S, Partial);
+
+ // C++ [temp.class.spec]p8: (DR1315)
+ // - Each template-parameter shall appear at least once in the
+ // template-id outside a non-deduced context.
+ // C++1z [temp.class.spec.match]p3 (P0127R2)
+ // If the template arguments of a partial specialization cannot be
+ // deduced because of the structure of its template-parameter-list
+ // and the template-id, the program is ill-formed.
+ auto *TemplateParams = Partial->getTemplateParameters();
+ llvm::SmallBitVector DeducibleParams(TemplateParams->size());
+ S.MarkUsedTemplateParameters(Partial->getTemplateArgs(), true,
+ TemplateParams->getDepth(), DeducibleParams);
+
+ if (!DeducibleParams.all()) {
+ unsigned NumNonDeducible = DeducibleParams.size() - DeducibleParams.count();
+ S.Diag(Partial->getLocation(), diag::ext_partial_specs_not_deducible)
+ << isa<VarTemplatePartialSpecializationDecl>(Partial)
+ << (NumNonDeducible > 1)
+ << SourceRange(Partial->getLocation(),
+ Partial->getTemplateArgsAsWritten()->RAngleLoc);
+ for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I) {
+ if (!DeducibleParams[I]) {
+ NamedDecl *Param = cast<NamedDecl>(TemplateParams->getParam(I));
+ if (Param->getDeclName())
+ S.Diag(Param->getLocation(),
+ diag::note_partial_spec_unused_parameter)
+ << Param->getDeclName();
+ else
+ S.Diag(Param->getLocation(),
+ diag::note_partial_spec_unused_parameter)
+ << "(anonymous)";
+ }
+ }
+ }
+}
+
+void Sema::CheckTemplatePartialSpecialization(
+ ClassTemplatePartialSpecializationDecl *Partial) {
+ checkTemplatePartialSpecialization(*this, Partial);
+}
+
+void Sema::CheckTemplatePartialSpecialization(
+ VarTemplatePartialSpecializationDecl *Partial) {
+ checkTemplatePartialSpecialization(*this, Partial);
+}
+
DeclResult Sema::ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc,
TemplateParameterList *TemplateParams, StorageClass SC,
@@ -2567,11 +2796,12 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
// Find the variable template (partial) specialization declaration that
// corresponds to these arguments.
if (IsPartialSpecialization) {
- if (CheckTemplatePartialSpecializationArgs(
- *this, TemplateNameLoc, VarTemplate->getTemplateParameters(),
- TemplateArgs.size(), Converted))
+ if (CheckTemplatePartialSpecializationArgs(TemplateNameLoc, VarTemplate,
+ TemplateArgs.size(), Converted))
return true;
+ // FIXME: Move these checks to CheckTemplatePartialSpecializationArgs so we
+ // also do them during instantiation.
bool InstantiationDependent;
if (!Name.isDependent() &&
!TemplateSpecializationType::anyDependentTemplateArguments(
@@ -2643,32 +2873,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
if (PrevPartial && PrevPartial->getInstantiatedFromMember())
PrevPartial->setMemberSpecialization();
- // Check that all of the template parameters of the variable template
- // partial specialization are deducible from the template
- // arguments. If not, this variable template partial specialization
- // will never be used.
- llvm::SmallBitVector DeducibleParams(TemplateParams->size());
- MarkUsedTemplateParameters(Partial->getTemplateArgs(), true,
- TemplateParams->getDepth(), DeducibleParams);
-
- if (!DeducibleParams.all()) {
- unsigned NumNonDeducible =
- DeducibleParams.size() - DeducibleParams.count();
- Diag(TemplateNameLoc, diag::warn_partial_specs_not_deducible)
- << /*variable template*/ 1 << (NumNonDeducible > 1)
- << SourceRange(TemplateNameLoc, RAngleLoc);
- for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I) {
- if (!DeducibleParams[I]) {
- NamedDecl *Param = cast<NamedDecl>(TemplateParams->getParam(I));
- if (Param->getDeclName())
- Diag(Param->getLocation(), diag::note_partial_spec_unused_parameter)
- << Param->getDeclName();
- else
- Diag(Param->getLocation(), diag::note_partial_spec_unused_parameter)
- << "(anonymous)";
- }
- }
- }
+ CheckTemplatePartialSpecialization(Partial);
} else {
// Create a new class template specialization declaration node for
// this explicit specialization or friend declaration.
@@ -2890,12 +3095,10 @@ Sema::CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc,
<< Decl;
// Print the matching partial specializations.
- for (SmallVector<MatchResult, 4>::iterator P = Matched.begin(),
- PEnd = Matched.end();
- P != PEnd; ++P)
- Diag(P->Partial->getLocation(), diag::note_partial_spec_match)
- << getTemplateArgumentBindingsText(
- P->Partial->getTemplateParameters(), *P->Args);
+ for (MatchResult P : Matched)
+ Diag(P.Partial->getLocation(), diag::note_partial_spec_match)
+ << getTemplateArgumentBindingsText(P.Partial->getTemplateParameters(),
+ *P.Args);
return true;
}
@@ -3206,7 +3409,7 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
// Add the converted template type argument.
ArgType = Context.getCanonicalType(ArgType);
-
+
// Objective-C ARC:
// If an explicitly-specified template argument type is a lifetime type
// with no lifetime qualifier, the __strong lifetime qualifier is inferred.
@@ -3217,7 +3420,7 @@ bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
Qs.setObjCLifetime(Qualifiers::OCL_Strong);
ArgType = Context.getQualifiedType(ArgType, Qs);
}
-
+
Converted.push_back(TemplateArgument(ArgType));
return false;
}
@@ -3347,7 +3550,7 @@ SubstDefaultTemplateArgument(Sema &SemaRef,
/// \param Converted the list of template arguments provided for template
/// parameters that precede \p Param in the template parameter list.
///
-/// \param QualifierLoc Will be set to the nested-name-specifier (with
+/// \param QualifierLoc Will be set to the nested-name-specifier (with
/// source-location information) that precedes the template name.
///
/// \returns the substituted template argument, or NULL if an error occurred.
@@ -3698,7 +3901,7 @@ bool Sema::CheckTemplateArgument(NamedDecl *Param,
return false;
}
-/// \brief Diagnose an arity mismatch in the
+/// \brief Diagnose an arity mismatch in the
static bool diagnoseArityMismatch(Sema &S, TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs) {
@@ -3708,7 +3911,7 @@ static bool diagnoseArityMismatch(Sema &S, TemplateDecl *Template,
SourceRange Range;
if (NumArgs > NumParams)
- Range = SourceRange(TemplateArgs[NumParams].getLocation(),
+ Range = SourceRange(TemplateArgs[NumParams].getLocation(),
TemplateArgs.getRAngleLoc());
S.Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
<< (NumArgs > NumParams)
@@ -4332,20 +4535,20 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
if (!S.getLangOpts().CPlusPlus11)
return NPV_NotNullPointer;
-
+
// Determine whether we have a constant expression.
ExprResult ArgRV = S.DefaultFunctionArrayConversion(Arg);
if (ArgRV.isInvalid())
return NPV_Error;
Arg = ArgRV.get();
-
+
Expr::EvalResult EvalResult;
SmallVector<PartialDiagnosticAt, 8> Notes;
EvalResult.Diag = &Notes;
if (!Arg->EvaluateAsRValue(EvalResult, S.Context) ||
EvalResult.HasSideEffects) {
SourceLocation DiagLoc = Arg->getExprLoc();
-
+
// If our only note is the usual "invalid subexpression" note, just point
// the caret at its location rather than producing an essentially
// redundant note.
@@ -4354,21 +4557,21 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
DiagLoc = Notes[0].first;
Notes.clear();
}
-
+
S.Diag(DiagLoc, diag::err_template_arg_not_address_constant)
<< Arg->getType() << Arg->getSourceRange();
for (unsigned I = 0, N = Notes.size(); I != N; ++I)
S.Diag(Notes[I].first, Notes[I].second);
-
+
S.Diag(Param->getLocation(), diag::note_template_param_here);
return NPV_Error;
}
-
+
// C++11 [temp.arg.nontype]p1:
// - an address constant expression of type std::nullptr_t
if (Arg->getType()->isNullPtrType())
return NPV_NullPointer;
-
+
// - a constant expression that evaluates to a null pointer value (4.10); or
// - a constant expression that evaluates to a null member pointer value
// (4.11); or
@@ -4381,7 +4584,7 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
S.IsQualificationConversion(Arg->getType(), ParamType, false,
ObjCLifetimeConversion))
return NPV_NullPointer;
-
+
// The types didn't match, but we know we got a null pointer; complain,
// then recover as if the types were correct.
S.Diag(Arg->getExprLoc(), diag::err_template_arg_wrongtype_null_constant)
@@ -4401,7 +4604,7 @@ isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
S.Diag(Param->getLocation(), diag::note_template_param_here);
return NPV_NullPointer;
}
-
+
// FIXME: If we ever want to support general, address-constant expressions
// as non-type template arguments, we should return the ExprResult here to
// be interpreted by the caller.
@@ -4902,12 +5105,33 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
CheckTemplateArgumentKind CTAK) {
SourceLocation StartLoc = Arg->getLocStart();
- // If either the parameter has a dependent type or the argument is
- // type-dependent, there's nothing we can check now.
- if (ParamType->isDependentType() || Arg->isTypeDependent()) {
- // FIXME: Produce a cloned, canonical expression?
- Converted = TemplateArgument(Arg);
- return Arg;
+ // If the parameter type somehow involves auto, deduce the type now.
+ if (getLangOpts().CPlusPlus1z && ParamType->isUndeducedType()) {
+ // When checking a deduced template argument, deduce from its type even if
+ // the type is dependent, in order to check the types of non-type template
+ // arguments line up properly in partial ordering.
+ Optional<unsigned> Depth;
+ if (CTAK != CTAK_Specified)
+ Depth = Param->getDepth() + 1;
+ if (DeduceAutoType(
+ Context.getTrivialTypeSourceInfo(ParamType, Param->getLocation()),
+ Arg, ParamType, Depth) == DAR_Failed) {
+ Diag(Arg->getExprLoc(),
+ diag::err_non_type_template_parm_type_deduction_failure)
+ << Param->getDeclName() << Param->getType() << Arg->getType()
+ << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+ }
+ // CheckNonTypeTemplateParameterType will produce a diagnostic if there's
+ // an error. The error message normally references the parameter
+ // declaration, but here we'll pass the argument location because that's
+ // where the parameter type is deduced.
+ ParamType = CheckNonTypeTemplateParameterType(ParamType, Arg->getExprLoc());
+ if (ParamType.isNull()) {
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+ }
}
// We should have already dropped all cv-qualifiers by now.
@@ -4915,30 +5139,36 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
"non-type template parameter type cannot be qualified");
if (CTAK == CTAK_Deduced &&
- !Context.hasSameUnqualifiedType(ParamType, Arg->getType())) {
- // C++ [temp.deduct.type]p17:
- // If, in the declaration of a function template with a non-type
- // template-parameter, the non-type template-parameter is used
- // in an expression in the function parameter-list and, if the
- // corresponding template-argument is deduced, the
- // template-argument type shall match the type of the
- // template-parameter exactly, except that a template-argument
- // deduced from an array bound may be of any integral type.
+ !Context.hasSameType(ParamType.getNonLValueExprType(Context),
+ Arg->getType())) {
+ // C++ [temp.deduct.type]p17: (DR1770)
+ // If P has a form that contains <i>, and if the type of i differs from
+ // the type of the corresponding template parameter of the template named
+ // by the enclosing simple-template-id, deduction fails.
+ //
+ // Note that CTAK will be CTAK_DeducedFromArrayBound if the form was [i]
+ // rather than <i>.
+ //
+ // FIXME: We interpret the 'i' here as referring to the expression
+ // denoting the non-type template parameter rather than the parameter
+ // itself, and so strip off references before comparing types. It's
+ // not clear how this is supposed to work for references.
Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch)
- << Arg->getType().getUnqualifiedType()
+ << Arg->getType()
<< ParamType.getUnqualifiedType();
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
}
- if (getLangOpts().CPlusPlus1z) {
- // FIXME: We can do some limited checking for a value-dependent but not
- // type-dependent argument.
- if (Arg->isValueDependent()) {
- Converted = TemplateArgument(Arg);
- return Arg;
- }
+ // If either the parameter has a dependent type or the argument is
+ // type-dependent, there's nothing we can check now.
+ if (ParamType->isDependentType() || Arg->isTypeDependent()) {
+ // FIXME: Produce a cloned, canonical expression?
+ Converted = TemplateArgument(Arg);
+ return Arg;
+ }
+ if (getLangOpts().CPlusPlus1z) {
// C++1z [temp.arg.nontype]p1:
// A template-argument for a non-type template parameter shall be
// a converted constant expression of the type of the template-parameter.
@@ -4948,6 +5178,13 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
if (ArgResult.isInvalid())
return ExprError();
+ // For a value-dependent argument, CheckConvertedConstantExpression is
+ // permitted (and expected) to be unable to determine a value.
+ if (ArgResult.get()->isValueDependent()) {
+ Converted = TemplateArgument(ArgResult.get());
+ return ArgResult;
+ }
+
QualType CanonParamType = Context.getCanonicalType(ParamType);
// Convert the APValue to a TemplateArgument.
@@ -5052,14 +5289,6 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// conversions (4.7) are applied.
if (getLangOpts().CPlusPlus11) {
- // We can't check arbitrary value-dependent arguments.
- // FIXME: If there's no viable conversion to the template parameter type,
- // we should be able to diagnose that prior to instantiation.
- if (Arg->isValueDependent()) {
- Converted = TemplateArgument(Arg);
- return Arg;
- }
-
// C++ [temp.arg.nontype]p1:
// A template-argument for a non-type, non-template template-parameter
// shall be one of:
@@ -5074,6 +5303,12 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
if (ArgResult.isInvalid())
return ExprError();
+ // We can't check arbitrary value-dependent arguments.
+ if (ArgResult.get()->isValueDependent()) {
+ Converted = TemplateArgument(ArgResult.get());
+ return ArgResult;
+ }
+
// Widen the argument value to sizeof(parameter type). This is almost
// always a no-op, except when the parameter type is bool. In
// that case, this may extend the argument from 1 bit to 8 bits.
@@ -5112,7 +5347,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
} else if (!Arg->isValueDependent()) {
class TmplArgICEDiagnoser : public VerifyICEDiagnoser {
QualType T;
-
+
public:
TmplArgICEDiagnoser(QualType T) : T(T) { }
@@ -5174,14 +5409,14 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
} else {
llvm::APSInt OldValue = Value;
-
+
// Coerce the template argument's value to the value it will have
// based on the template parameter's type.
unsigned AllowedBits = Context.getTypeSize(IntegerType);
if (Value.getBitWidth() != AllowedBits)
Value = Value.extOrTrunc(AllowedBits);
Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
-
+
// Complain if an unsigned parameter received a negative value.
if (IntegerType->isUnsignedIntegerOrEnumerationType()
&& (OldValue.isSigned() && OldValue.isNegative())) {
@@ -5190,7 +5425,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
<< Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
}
-
+
// Complain if we overflowed the template parameter's type.
unsigned RequiredBits;
if (IntegerType->isUnsignedIntegerOrEnumerationType())
@@ -5209,7 +5444,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
}
Converted = TemplateArgument(Context, Value,
- ParamType->isEnumeralType()
+ ParamType->isEnumeralType()
? Context.getCanonicalType(ParamType)
: IntegerType);
return Arg;
@@ -5321,17 +5556,17 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Converted = TemplateArgument(Arg);
return Arg;
}
-
+
switch (isNullPointerValueTemplateArgument(*this, Param, ParamType, Arg)) {
case NPV_NotNullPointer:
Diag(Arg->getExprLoc(), diag::err_template_arg_not_convertible)
<< Arg->getType() << ParamType;
Diag(Param->getLocation(), diag::note_template_param_here);
return ExprError();
-
+
case NPV_Error:
return ExprError();
-
+
case NPV_NullPointer:
Diag(Arg->getExprLoc(), diag::warn_cxx98_compat_template_arg_null);
Converted = TemplateArgument(Context.getCanonicalType(ParamType),
@@ -5350,6 +5585,10 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
return Arg;
}
+static void DiagnoseTemplateParameterListArityMismatch(
+ Sema &S, TemplateParameterList *New, TemplateParameterList *Old,
+ Sema::TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc);
+
/// \brief Check a template argument against its corresponding
/// template template parameter.
///
@@ -5366,6 +5605,9 @@ bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
return false;
}
+ if (Template->isInvalidDecl())
+ return true;
+
// C++0x [temp.arg.template]p1:
// A template-argument for a template template-parameter shall be
// the name of a class template or an alias template, expressed as an
@@ -5393,6 +5635,25 @@ bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
if (Param->isExpandedParameterPack())
Params = Param->getExpansionTemplateParameters(ArgumentPackIndex);
+ // C++1z [temp.arg.template]p3: (DR 150)
+ // A template-argument matches a template template-parameter P when P
+ // is at least as specialized as the template-argument A.
+ if (getLangOpts().RelaxedTemplateTemplateArgs) {
+ // Quick check for the common case:
+ // If P contains a parameter pack, then A [...] matches P if each of A's
+ // template parameters matches the corresponding template parameter in
+ // the template-parameter-list of P.
+ if (TemplateParameterListsAreEqual(
+ Template->getTemplateParameters(), Params, false,
+ TPL_TemplateTemplateArgumentMatch, Arg.getLocation()))
+ return false;
+
+ if (isTemplateTemplateParameterAtLeastAsSpecializedAs(Params, Template,
+ Arg.getLocation()))
+ return false;
+ // FIXME: Produce better diagnostics for deduction failures.
+ }
+
return !TemplateParameterListsAreEqual(Template->getTemplateParameters(),
Params,
true,
@@ -5578,7 +5839,7 @@ Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
Context.getTrivialTypeSourceInfo(OrigT, Loc),
Loc, Loc);
}
-
+
return E;
}
@@ -5604,7 +5865,7 @@ static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
return false;
}
- // Check that both are parameter packs are neither are parameter packs.
+ // Check that both are parameter packs or neither are parameter packs.
// However, if we are matching a template template argument to a
// template template parameter, the template template parameter can have
// a parameter pack where the template template argument does not.
@@ -5816,12 +6077,14 @@ Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) {
// C++ [temp]p4:
// A template [...] shall not have C linkage.
DeclContext *Ctx = S->getEntity();
- if (Ctx && Ctx->isExternCContext())
- return Diag(TemplateParams->getTemplateLoc(), diag::err_template_linkage)
- << TemplateParams->getSourceRange();
-
- while (Ctx && isa<LinkageSpecDecl>(Ctx))
- Ctx = Ctx->getParent();
+ if (Ctx && Ctx->isExternCContext()) {
+ Diag(TemplateParams->getTemplateLoc(), diag::err_template_linkage)
+ << TemplateParams->getSourceRange();
+ if (const LinkageSpecDecl *LSD = Ctx->getExternCContext())
+ Diag(LSD->getExternLoc(), diag::note_extern_c_begins_here);
+ return true;
+ }
+ Ctx = Ctx->getRedeclContext();
// C++ [temp]p2:
// A template-declaration can appear only as a namespace scope or
@@ -5957,7 +6220,7 @@ static bool CheckTemplateSpecializationScope(Sema &S,
<< Specialized;
return true;
}
-
+
// C++ [temp.class.spec]p6:
// A class template partial specialization may be declared or redeclared
// in any namespace scope in which its definition may be defined (14.5.1
@@ -6035,12 +6298,12 @@ static bool CheckTemplateSpecializationScope(Sema &S,
return false;
}
-static SourceRange findTemplateParameter(unsigned Depth, Expr *E) {
- if (!E->isInstantiationDependent())
+static SourceRange findTemplateParameterInType(unsigned Depth, Expr *E) {
+ if (!E->isTypeDependent())
return SourceLocation();
- DependencyChecker Checker(Depth);
+ DependencyChecker Checker(Depth, /*IgnoreNonTypeDependent*/true);
Checker.TraverseStmt(E);
- if (Checker.Match && Checker.MatchLoc.isInvalid())
+ if (Checker.MatchLoc.isInvalid())
return E->getSourceRange();
return Checker.MatchLoc;
}
@@ -6048,9 +6311,9 @@ static SourceRange findTemplateParameter(unsigned Depth, Expr *E) {
static SourceRange findTemplateParameter(unsigned Depth, TypeLoc TL) {
if (!TL.getType()->isDependentType())
return SourceLocation();
- DependencyChecker Checker(Depth);
+ DependencyChecker Checker(Depth, /*IgnoreNonTypeDependent*/true);
Checker.TraverseTypeLoc(TL);
- if (Checker.Match && Checker.MatchLoc.isInvalid())
+ if (Checker.MatchLoc.isInvalid())
return TL.getSourceRange();
return Checker.MatchLoc;
}
@@ -6102,8 +6365,16 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs(
// shall not involve a template parameter of the partial
// specialization except when the argument expression is a
// simple identifier.
+ // -- The type of a template parameter corresponding to a
+ // specialized non-type argument shall not be dependent on a
+ // parameter of the specialization.
+ // DR1315 removes the first bullet, leaving an incoherent set of rules.
+ // We implement a compromise between the original rules and DR1315:
+ // -- A specialized non-type template argument shall not be
+ // type-dependent and the corresponding template parameter
+ // shall have a non-dependent type.
SourceRange ParamUseRange =
- findTemplateParameter(Param->getDepth(), ArgExpr);
+ findTemplateParameterInType(Param->getDepth(), ArgExpr);
if (ParamUseRange.isValid()) {
if (IsDefaultArgument) {
S.Diag(TemplateNameLoc,
@@ -6119,26 +6390,15 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs(
return true;
}
- // -- The type of a template parameter corresponding to a
- // specialized non-type argument shall not be dependent on a
- // parameter of the specialization.
- //
- // FIXME: We need to delay this check until instantiation in some cases:
- //
- // template<template<typename> class X> struct A {
- // template<typename T, X<T> N> struct B;
- // template<typename T> struct B<T, 0>;
- // };
- // template<typename> using X = int;
- // A<X>::B<int, 0> b;
ParamUseRange = findTemplateParameter(
- Param->getDepth(), Param->getTypeSourceInfo()->getTypeLoc());
+ Param->getDepth(), Param->getTypeSourceInfo()->getTypeLoc());
if (ParamUseRange.isValid()) {
S.Diag(IsDefaultArgument ? TemplateNameLoc : ArgExpr->getLocStart(),
diag::err_dependent_typed_non_type_arg_in_partial_spec)
- << Param->getType() << ParamUseRange;
+ << Param->getType();
S.Diag(Param->getLocation(), diag::note_template_param_here)
- << (IsDefaultArgument ? ParamUseRange : SourceRange());
+ << (IsDefaultArgument ? ParamUseRange : SourceRange())
+ << ParamUseRange;
return true;
}
}
@@ -6150,27 +6410,32 @@ static bool CheckNonTypeTemplatePartialSpecializationArgs(
/// partial specialization according to C++ [temp.class.spec]p9.
///
/// \param TemplateNameLoc the location of the template name.
-/// \param TemplateParams the template parameters of the primary class
+/// \param PrimaryTemplate the template parameters of the primary class
/// template.
/// \param NumExplicit the number of explicitly-specified template arguments.
/// \param TemplateArgs the template arguments of the class template
/// partial specialization.
///
/// \returns \c true if there was an error, \c false otherwise.
-static bool CheckTemplatePartialSpecializationArgs(
- Sema &S, SourceLocation TemplateNameLoc,
- TemplateParameterList *TemplateParams, unsigned NumExplicit,
- SmallVectorImpl<TemplateArgument> &TemplateArgs) {
- const TemplateArgument *ArgList = TemplateArgs.data();
+bool Sema::CheckTemplatePartialSpecializationArgs(
+ SourceLocation TemplateNameLoc, TemplateDecl *PrimaryTemplate,
+ unsigned NumExplicit, ArrayRef<TemplateArgument> TemplateArgs) {
+ // We have to be conservative when checking a template in a dependent
+ // context.
+ if (PrimaryTemplate->getDeclContext()->isDependentContext())
+ return false;
+ TemplateParameterList *TemplateParams =
+ PrimaryTemplate->getTemplateParameters();
for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
NonTypeTemplateParmDecl *Param
= dyn_cast<NonTypeTemplateParmDecl>(TemplateParams->getParam(I));
if (!Param)
continue;
- if (CheckNonTypeTemplatePartialSpecializationArgs(
- S, TemplateNameLoc, Param, &ArgList[I], 1, I >= NumExplicit))
+ if (CheckNonTypeTemplatePartialSpecializationArgs(*this, TemplateNameLoc,
+ Param, &TemplateArgs[I],
+ 1, I >= NumExplicit))
return true;
}
@@ -6314,11 +6579,12 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
// Find the class template (partial) specialization declaration that
// corresponds to these arguments.
if (isPartialSpecialization) {
- if (CheckTemplatePartialSpecializationArgs(
- *this, TemplateNameLoc, ClassTemplate->getTemplateParameters(),
- TemplateArgs.size(), Converted))
+ if (CheckTemplatePartialSpecializationArgs(TemplateNameLoc, ClassTemplate,
+ TemplateArgs.size(), Converted))
return true;
+ // FIXME: Move this to CheckTemplatePartialSpecializationArgs so we
+ // also do it during instantiation.
bool InstantiationDependent;
if (!Name.isDependent() &&
!TemplateSpecializationType::anyDependentTemplateArguments(
@@ -6363,6 +6629,9 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
//
// -- The argument list of the specialization shall not be identical
// to the implicit argument list of the primary template.
+ //
+ // This rule has since been removed, because it's redundant given DR1495,
+ // but we keep it because it produces better diagnostics and recovery.
Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
<< /*class template*/0 << (TUK == TUK_Definition)
<< FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
@@ -6405,34 +6674,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
if (PrevPartial && PrevPartial->getInstantiatedFromMember())
PrevPartial->setMemberSpecialization();
- // Check that all of the template parameters of the class template
- // partial specialization are deducible from the template
- // arguments. If not, this class template partial specialization
- // will never be used.
- llvm::SmallBitVector DeducibleParams(TemplateParams->size());
- MarkUsedTemplateParameters(Partial->getTemplateArgs(), true,
- TemplateParams->getDepth(),
- DeducibleParams);
-
- if (!DeducibleParams.all()) {
- unsigned NumNonDeducible = DeducibleParams.size()-DeducibleParams.count();
- Diag(TemplateNameLoc, diag::warn_partial_specs_not_deducible)
- << /*class template*/0 << (NumNonDeducible > 1)
- << SourceRange(TemplateNameLoc, RAngleLoc);
- for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I) {
- if (!DeducibleParams[I]) {
- NamedDecl *Param = cast<NamedDecl>(TemplateParams->getParam(I));
- if (Param->getDeclName())
- Diag(Param->getLocation(),
- diag::note_partial_spec_unused_parameter)
- << Param->getDeclName();
- else
- Diag(Param->getLocation(),
- diag::note_partial_spec_unused_parameter)
- << "(anonymous)";
- }
- }
- }
+ CheckTemplatePartialSpecialization(Partial);
} else {
// Create a new class template specialization declaration node for
// this explicit specialization or friend declaration.
@@ -6509,8 +6751,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
TUK = TUK_Declaration;
} else if (Def) {
SourceRange Range(TemplateNameLoc, RAngleLoc);
- Diag(TemplateNameLoc, diag::err_redefinition)
- << Context.getTypeDeclType(Specialization) << Range;
+ Diag(TemplateNameLoc, diag::err_redefinition) << Specialization << Range;
Diag(Def->getLocation(), diag::note_previous_definition);
Specialization->setInvalidDecl();
return true;
@@ -6531,7 +6772,7 @@ Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
Diag(Specialization->getLocation(), diag::err_module_private_specialization)
<< (isPartialSpecialization? 1 : 0)
<< FixItHint::CreateRemoval(ModulePrivateLoc);
-
+
// Build the fully-sugared type for this class template
// specialization as the user wrote in the specialization
// itself. This means that we'll pretty-print the type retrieved
@@ -6748,13 +6989,7 @@ Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
// instantiation of a template appears after a declaration of
// an explicit specialization for that template, the explicit
// instantiation has no effect.
- //
- // In C++98/03 mode, we only give an extension warning here, because it
- // is not harmful to try to explicitly instantiate something that
- // has been explicitly specialized.
- Diag(NewLoc, getLangOpts().CPlusPlus11 ?
- diag::warn_cxx98_compat_explicit_instantiation_after_specialization :
- diag::ext_explicit_instantiation_after_specialization)
+ Diag(NewLoc, diag::warn_explicit_instantiation_after_specialization)
<< PrevDecl;
Diag(PrevDecl->getLocation(),
diag::note_previous_template_specialization);
@@ -6926,6 +7161,21 @@ bool Sema::CheckFunctionTemplateSpecialization(
continue;
}
+ // Target attributes are part of the cuda function signature, so
+ // the deduced template's cuda target must match that of the
+ // specialization. Given that C++ template deduction does not
+ // take target attributes into account, we reject candidates
+ // here that have a different target.
+ if (LangOpts.CUDA &&
+ IdentifyCUDATarget(Specialization,
+ /* IgnoreImplicitHDAttributes = */ true) !=
+ IdentifyCUDATarget(FD, /* IgnoreImplicitHDAttributes = */ true)) {
+ FailedCandidates.addCandidate().set(
+ I.getPair(), FunTmpl->getTemplatedDecl(),
+ MakeDeductionFailureInfo(Context, TDK_CUDATargetMismatch, Info));
+ continue;
+ }
+
// Record this candidate.
if (ExplicitTemplateArgs)
ConvertedTemplateArgs[Specialization] = std::move(Args);
@@ -7002,7 +7252,7 @@ bool Sema::CheckFunctionTemplateSpecialization(
SpecInfo->getPointOfInstantiation(),
HasNoEffect))
return true;
-
+
// Mark the prior declaration as an explicit specialization, so that later
// clients know that this is an explicit specialization.
if (!isFriend) {
@@ -7036,6 +7286,14 @@ bool Sema::CheckFunctionTemplateSpecialization(
SpecInfo->getTemplateSpecializationKind(),
ExplicitTemplateArgs ? &ConvertedTemplateArgs[Specialization] : nullptr);
+ // A function template specialization inherits the target attributes
+ // of its template. (We require the attributes explicitly in the
+ // code to match, but a template may have implicit attributes by
+ // virtue e.g. of being constexpr, and it passes these implicit
+ // attributes on to its specializations.)
+ if (LangOpts.CUDA)
+ inheritCUDATargetAttrs(FD, *Specialization->getPrimaryTemplate());
+
// The "previous declaration" for this function template specialization is
// the prior function template specialization.
Previous.clear();
@@ -7190,7 +7448,7 @@ Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
if (InstantiationFunction->isDeleted()) {
assert(InstantiationFunction->getCanonicalDecl() ==
InstantiationFunction);
- InstantiationFunction->setDeletedAsWritten(false);
+ InstantiationFunction->setDeletedAsWritten(false);
}
}
@@ -7318,6 +7576,30 @@ static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
return false;
}
+/// Make a dllexport or dllimport attr on a class template specialization take
+/// effect.
+static void dllExportImportClassTemplateSpecialization(
+ Sema &S, ClassTemplateSpecializationDecl *Def) {
+ auto *A = cast_or_null<InheritableAttr>(getDLLAttr(Def));
+ assert(A && "dllExportImportClassTemplateSpecialization called "
+ "on Def without dllexport or dllimport");
+
+ // We reject explicit instantiations in class scope, so there should
+ // never be any delayed exported classes to worry about.
+ assert(S.DelayedDllExportClasses.empty() &&
+ "delayed exports present at explicit instantiation");
+ S.checkClassLevelDLLAttribute(Def);
+
+ // Propagate attribute to base class templates.
+ for (auto &B : Def->bases()) {
+ if (auto *BT = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
+ B.getType()->getAsCXXRecordDecl()))
+ S.propagateDLLAttrToBaseClassTemplate(Def, A, BT, B.getLocStart());
+ }
+
+ S.referenceDLLExportedClassMethods();
+}
+
// Explicit instantiation of a class template specialization
DeclResult
Sema::ActOnExplicitInstantiation(Scope *S,
@@ -7344,14 +7626,8 @@ Sema::ActOnExplicitInstantiation(Scope *S,
ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(TD);
if (!ClassTemplate) {
- unsigned ErrorKind = 0;
- if (isa<TypeAliasTemplateDecl>(TD)) {
- ErrorKind = 4;
- } else if (isa<TemplateTemplateParmDecl>(TD)) {
- ErrorKind = 5;
- }
-
- Diag(TemplateNameLoc, diag::err_tag_reference_non_tag) << ErrorKind;
+ NonTagKind NTK = getNonTagTypeDeclKind(TD, Kind);
+ Diag(TemplateNameLoc, diag::err_tag_reference_non_tag) << TD << NTK << Kind;
Diag(TD->getLocation(), diag::note_previous_use);
return true;
}
@@ -7561,7 +7837,8 @@ Sema::ActOnExplicitInstantiation(Scope *S,
Def->setTemplateSpecializationKind(TSK);
if (!getDLLAttr(Def) && getDLLAttr(Specialization) &&
- Context.getTargetInfo().getCXXABI().isMicrosoft()) {
+ (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment())) {
// In the MS ABI, an explicit instantiation definition can add a dll
// attribute to a template with a previous instantiation declaration.
// MinGW doesn't allow this.
@@ -7569,23 +7846,33 @@ Sema::ActOnExplicitInstantiation(Scope *S,
getDLLAttr(Specialization)->clone(getASTContext()));
A->setInherited(true);
Def->addAttr(A);
-
- // We reject explicit instantiations in class scope, so there should
- // never be any delayed exported classes to worry about.
- assert(DelayedDllExportClasses.empty() &&
- "delayed exports present at explicit instantiation");
- checkClassLevelDLLAttribute(Def);
- referenceDLLExportedClassMethods();
-
- // Propagate attribute to base class templates.
- for (auto &B : Def->bases()) {
- if (auto *BT = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
- B.getType()->getAsCXXRecordDecl()))
- propagateDLLAttrToBaseClassTemplate(Def, A, BT, B.getLocStart());
- }
+ dllExportImportClassTemplateSpecialization(*this, Def);
}
}
+ // Fix a TSK_ImplicitInstantiation followed by a
+ // TSK_ExplicitInstantiationDefinition
+ if (Old_TSK == TSK_ImplicitInstantiation &&
+ Specialization->hasAttr<DLLExportAttr>() &&
+ (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getTriple().isWindowsItaniumEnvironment())) {
+ // In the MS ABI, an explicit instantiation definition can add a dll
+ // attribute to a template with a previous implicit instantiation.
+ // MinGW doesn't allow this. We limit clang to only adding dllexport, to
+ // avoid potentially strange codegen behavior. For example, if we extend
+ // this conditional to dllimport, and we have a source file calling a
+ // method on an implicitly instantiated template class instance and then
+ // declaring a dllimport explicit instantiation definition for the same
+ // template class, the codegen for the method call will not respect the
+ // dllimport, while it will with cl. The Def will already have the DLL
+ // attribute, since the Def and Specialization will be the same in the
+ // case of Old_TSK == TSK_ImplicitInstantiation, and we already added the
+ // attribute to the Specialization; we just need to make it take effect.
+ assert(Def == Specialization &&
+ "Def and Specialization should match for implicit instantiation");
+ dllExportImportClassTemplateSpecialization(*this, Def);
+ }
+
// Set the template specialization kind. Make sure it is set before
// instantiating the members which will trigger ASTConsumer callbacks.
Specialization->setTemplateSpecializationKind(TSK);
@@ -7754,18 +8041,18 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
return true;
// C++ [dcl.stc]p1:
- // A storage-class-specifier shall not be specified in [...] an explicit
+ // A storage-class-specifier shall not be specified in [...] an explicit
// instantiation (14.7.2) directive.
if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) {
Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_of_typedef)
<< Name;
return true;
- } else if (D.getDeclSpec().getStorageClassSpec()
+ } else if (D.getDeclSpec().getStorageClassSpec()
!= DeclSpec::SCS_unspecified) {
// Complain about then remove the storage class specifier.
Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_storage_class)
<< FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
-
+
D.getMutableDeclSpec().ClearStorageClassSpecs();
}
@@ -7957,13 +8244,15 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
// instantiated from the member definition associated with its class
// template.
UnresolvedSet<8> Matches;
+ AttributeList *Attr = D.getDeclSpec().getAttributes().getList();
TemplateSpecCandidateSet FailedCandidates(D.getIdentifierLoc());
for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end();
P != PEnd; ++P) {
NamedDecl *Prev = *P;
if (!HasExplicitTemplateArgs) {
if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Prev)) {
- QualType Adjusted = adjustCCAndNoReturn(R, Method->getType());
+ QualType Adjusted = adjustCCAndNoReturn(R, Method->getType(),
+ /*AdjustExceptionSpec*/true);
if (Context.hasSameUnqualifiedType(Method->getType(), Adjusted)) {
Matches.clear();
@@ -7993,6 +8282,21 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
continue;
}
+ // Target attributes are part of the cuda function signature, so
+ // the cuda target of the instantiated function must match that of its
+ // template. Given that C++ template deduction does not take
+ // target attributes into account, we reject candidates here that
+ // have a different target.
+ if (LangOpts.CUDA &&
+ IdentifyCUDATarget(Specialization,
+ /* IgnoreImplicitHDAttributes = */ true) !=
+ IdentifyCUDATarget(Attr)) {
+ FailedCandidates.addCandidate().set(
+ P.getPair(), FunTmpl->getTemplatedDecl(),
+ MakeDeductionFailureInfo(Context, TDK_CUDATargetMismatch, Info));
+ continue;
+ }
+
Matches.addDecl(Specialization, P.getAccess());
}
@@ -8063,7 +8367,6 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
}
Specialization->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
- AttributeList *Attr = D.getDeclSpec().getAttributes().getList();
if (Attr)
ProcessDeclAttributeList(S, Specialization, Attr);
@@ -8131,7 +8434,7 @@ Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// Create the resulting type.
ElaboratedTypeKeyword Kwd = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
QualType Result = Context.getDependentNameType(Kwd, NNS, Name);
-
+
// Create type-source location information for this type.
TypeLocBuilder TLB;
DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(Result);
@@ -8147,7 +8450,7 @@ Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
SourceLocation IdLoc) {
if (SS.isInvalid())
return true;
-
+
if (TypenameLoc.isValid() && S && !S->getTemplateParamParent())
Diag(TypenameLoc,
getLangOpts().CPlusPlus11 ?
@@ -8193,11 +8496,11 @@ Sema::ActOnTypenameType(Scope *S,
diag::warn_cxx98_compat_typename_outside_of_template :
diag::ext_typename_outside_of_template)
<< FixItHint::CreateRemoval(TypenameLoc);
-
+
// Translate the parser's template argument list in our AST format.
TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
translateTemplateArguments(TemplateArgsIn, TemplateArgs);
-
+
TemplateName Template = TemplateIn.get();
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
// Construct a dependent template specialization type.
@@ -8207,10 +8510,10 @@ Sema::ActOnTypenameType(Scope *S,
DTN->getQualifier(),
DTN->getIdentifier(),
TemplateArgs);
-
+
// Create source-location information for this type.
TypeLocBuilder Builder;
- DependentTemplateSpecializationTypeLoc SpecTL
+ DependentTemplateSpecializationTypeLoc SpecTL
= Builder.push<DependentTemplateSpecializationTypeLoc>(T);
SpecTL.setElaboratedKeywordLoc(TypenameLoc);
SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
@@ -8222,11 +8525,11 @@ Sema::ActOnTypenameType(Scope *S,
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
}
-
+
QualType T = CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs);
if (T.isNull())
return true;
-
+
// Provide source-location information for the template specialization type.
TypeLocBuilder Builder;
TemplateSpecializationTypeLoc SpecTL
@@ -8237,12 +8540,12 @@ Sema::ActOnTypenameType(Scope *S,
SpecTL.setRAngleLoc(RAngleLoc);
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
-
+
T = Context.getElaboratedType(ETK_Typename, SS.getScopeRep(), T);
ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T);
TL.setElaboratedKeywordLoc(TypenameLoc);
TL.setQualifierLoc(SS.getWithLocInContext(Context));
-
+
TypeSourceInfo *TSI = Builder.getTypeSourceInfo(Context, T);
return CreateParsedType(T, TSI);
}
@@ -8287,9 +8590,9 @@ static bool isEnableIf(NestedNameSpecifierLoc NNS, const IdentifierInfo &II,
/// \brief Build the type that describes a C++ typename specifier,
/// e.g., "typename T::type".
QualType
-Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
+Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
- NestedNameSpecifierLoc QualifierLoc,
+ NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc) {
CXXScopeSpec SS;
@@ -8300,8 +8603,8 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
// If the nested-name-specifier is dependent and couldn't be
// resolved to a type, build a typename type.
assert(QualifierLoc.getNestedNameSpecifier()->isDependent());
- return Context.getDependentNameType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
+ return Context.getDependentNameType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
&II);
}
@@ -8353,8 +8656,8 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
case LookupResult::NotFoundInCurrentInstantiation:
// Okay, it's a member of an unknown instantiation.
- return Context.getDependentNameType(Keyword,
- QualifierLoc.getNestedNameSpecifier(),
+ return Context.getDependentNameType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
&II);
case LookupResult::Found:
@@ -8362,7 +8665,7 @@ Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
// We found a type. Build an ElaboratedType, since the
// typename-specifier was just sugar.
MarkAnyDeclReferenced(Type->getLocation(), Type, /*OdrUse=*/false);
- return Context.getElaboratedType(ETK_Typename,
+ return Context.getElaboratedType(ETK_Typename,
QualifierLoc.getNestedNameSpecifier(),
Context.getTypeDeclType(Type));
}
@@ -8429,7 +8732,7 @@ namespace {
this->Loc = Loc;
this->Entity = Entity;
}
-
+
ExprResult TransformLambdaExpr(LambdaExpr *E) {
// Lambdas never need to be transformed.
return E;
@@ -8480,15 +8783,15 @@ ExprResult Sema::RebuildExprInCurrentInstantiation(Expr *E) {
}
bool Sema::RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS) {
- if (SS.isInvalid())
+ if (SS.isInvalid())
return true;
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
CurrentInstantiationRebuilder Rebuilder(*this, SS.getRange().getBegin(),
DeclarationName());
- NestedNameSpecifierLoc Rebuilt
+ NestedNameSpecifierLoc Rebuilt
= Rebuilder.TransformNestedNameSpecifierLoc(QualifierLoc);
- if (!Rebuilt)
+ if (!Rebuilt)
return true;
SS.Adopt(Rebuilt);
@@ -8501,36 +8804,36 @@ bool Sema::RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params) {
for (unsigned I = 0, N = Params->size(); I != N; ++I) {
Decl *Param = Params->getParam(I);
-
+
// There is nothing to rebuild in a type parameter.
if (isa<TemplateTypeParmDecl>(Param))
continue;
-
+
// Rebuild the template parameter list of a template template parameter.
- if (TemplateTemplateParmDecl *TTP
+ if (TemplateTemplateParmDecl *TTP
= dyn_cast<TemplateTemplateParmDecl>(Param)) {
if (RebuildTemplateParamsInCurrentInstantiation(
TTP->getTemplateParameters()))
return true;
-
+
continue;
}
-
+
// Rebuild the type of a non-type template parameter.
NonTypeTemplateParmDecl *NTTP = cast<NonTypeTemplateParmDecl>(Param);
- TypeSourceInfo *NewTSI
- = RebuildTypeInCurrentInstantiation(NTTP->getTypeSourceInfo(),
- NTTP->getLocation(),
+ TypeSourceInfo *NewTSI
+ = RebuildTypeInCurrentInstantiation(NTTP->getTypeSourceInfo(),
+ NTTP->getLocation(),
NTTP->getDeclName());
if (!NewTSI)
return true;
-
+
if (NewTSI != NTTP->getTypeSourceInfo()) {
NTTP->setTypeSourceInfo(NewTSI);
NTTP->setType(NewTSI->getType());
}
}
-
+
return false;
}
@@ -8580,12 +8883,12 @@ void Sema::MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
if (!FD)
return;
- LateParsedTemplate *LPT = new LateParsedTemplate;
+ auto LPT = llvm::make_unique<LateParsedTemplate>();
// Take tokens to avoid allocations
LPT->Toks.swap(Toks);
LPT->D = FnD;
- LateParsedTemplateMap.insert(std::make_pair(FD, LPT));
+ LateParsedTemplateMap.insert(std::make_pair(FD, std::move(LPT)));
FD->setLateTemplateParsed(true);
}
@@ -8611,6 +8914,7 @@ bool Sema::IsInsideALocalClassWithinATemplateFunction() {
return false;
}
+namespace {
/// \brief Walk the path from which a declaration was instantiated, and check
/// that every explicit specialization along that path is visible. This enforces
/// C++ [temp.expl.spec]/6:
@@ -8738,6 +9042,7 @@ private:
}
}
};
+} // end anonymous namespace
void Sema::checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec) {
if (!getLangOpts().Modules)
diff --git a/lib/Sema/SemaTemplateDeduction.cpp b/lib/Sema/SemaTemplateDeduction.cpp
index 5740bc712e86..0bc85a2f2635 100644
--- a/lib/Sema/SemaTemplateDeduction.cpp
+++ b/lib/Sema/SemaTemplateDeduction.cpp
@@ -100,12 +100,13 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
SmallVectorImpl<DeducedTemplateArgument> &
Deduced,
unsigned TDF,
- bool PartialOrdering = false);
+ bool PartialOrdering = false,
+ bool DeducedFromArrayBound = false);
static Sema::TemplateDeductionResult
DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
- const TemplateArgument *Params, unsigned NumParams,
- const TemplateArgument *Args, unsigned NumArgs,
+ ArrayRef<TemplateArgument> Params,
+ ArrayRef<TemplateArgument> Args,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
bool NumberOfArgumentsMustMatch);
@@ -113,7 +114,8 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
/// \brief If the given expression is of a form that permits the deduction
/// of a non-type template parameter, return the declaration of that
/// non-type template parameter.
-static NonTypeTemplateParmDecl *getDeducedParameterFromExpr(Expr *E) {
+static NonTypeTemplateParmDecl *
+getDeducedParameterFromExpr(TemplateDeductionInfo &Info, Expr *E) {
// If we are within an alias template, the expression may have undergone
// any number of parameter substitutions already.
while (1) {
@@ -127,7 +129,9 @@ static NonTypeTemplateParmDecl *getDeducedParameterFromExpr(Expr *E) {
}
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
- return dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl());
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl()))
+ if (NTTP->getDepth() == Info.getDeducedDepth())
+ return NTTP;
return nullptr;
}
@@ -157,6 +161,20 @@ checkDeducedTemplateArguments(ASTContext &Context,
if (Y.isNull())
return X;
+ // If we have two non-type template argument values deduced for the same
+ // parameter, they must both match the type of the parameter, and thus must
+ // match each other's type. As we're only keeping one of them, we must check
+ // for that now. The exception is that if either was deduced from an array
+ // bound, the type is permitted to differ.
+ if (!X.wasDeducedFromArrayBound() && !Y.wasDeducedFromArrayBound()) {
+ QualType XType = X.getNonTypeTemplateArgumentType();
+ if (!XType.isNull()) {
+ QualType YType = Y.getNonTypeTemplateArgumentType();
+ if (YType.isNull() || !Context.hasSameType(XType, YType))
+ return DeducedTemplateArgument();
+ }
+ }
+
switch (X.getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Non-deduced template arguments handled above");
@@ -167,6 +185,12 @@ checkDeducedTemplateArguments(ASTContext &Context,
Context.hasSameType(X.getAsType(), Y.getAsType()))
return X;
+ // If one of the two arguments was deduced from an array bound, the other
+ // supersedes it.
+ if (X.wasDeducedFromArrayBound() != Y.wasDeducedFromArrayBound())
+ return X.wasDeducedFromArrayBound() ? Y : X;
+
+ // The arguments are not compatible.
return DeducedTemplateArgument();
case TemplateArgument::Integral:
@@ -177,9 +201,7 @@ checkDeducedTemplateArguments(ASTContext &Context,
Y.getKind() == TemplateArgument::Declaration ||
(Y.getKind() == TemplateArgument::Integral &&
hasSameExtendedValue(X.getAsIntegral(), Y.getAsIntegral())))
- return DeducedTemplateArgument(X,
- X.wasDeducedFromArrayBound() &&
- Y.wasDeducedFromArrayBound());
+ return X.wasDeducedFromArrayBound() ? Y : X;
// All other combinations are incompatible.
return DeducedTemplateArgument();
@@ -201,37 +223,38 @@ checkDeducedTemplateArguments(ASTContext &Context,
// All other combinations are incompatible.
return DeducedTemplateArgument();
- case TemplateArgument::Expression:
- // If we deduced a dependent expression in one case and either an integral
- // constant or a declaration in another case, keep the integral constant
- // or declaration.
- if (Y.getKind() == TemplateArgument::Integral ||
- Y.getKind() == TemplateArgument::Declaration)
- return DeducedTemplateArgument(Y, X.wasDeducedFromArrayBound() &&
- Y.wasDeducedFromArrayBound());
-
- if (Y.getKind() == TemplateArgument::Expression) {
- // Compare the expressions for equality
- llvm::FoldingSetNodeID ID1, ID2;
- X.getAsExpr()->Profile(ID1, Context, true);
- Y.getAsExpr()->Profile(ID2, Context, true);
- if (ID1 == ID2)
- return X;
- }
+ case TemplateArgument::Expression: {
+ if (Y.getKind() != TemplateArgument::Expression)
+ return checkDeducedTemplateArguments(Context, Y, X);
- // All other combinations are incompatible.
+ // Compare the expressions for equality
+ llvm::FoldingSetNodeID ID1, ID2;
+ X.getAsExpr()->Profile(ID1, Context, true);
+ Y.getAsExpr()->Profile(ID2, Context, true);
+ if (ID1 == ID2)
+ return X.wasDeducedFromArrayBound() ? Y : X;
+
+ // Differing dependent expressions are incompatible.
return DeducedTemplateArgument();
+ }
case TemplateArgument::Declaration:
+ assert(!X.wasDeducedFromArrayBound());
+
// If we deduced a declaration and a dependent expression, keep the
// declaration.
if (Y.getKind() == TemplateArgument::Expression)
return X;
// If we deduced a declaration and an integral constant, keep the
- // integral constant.
- if (Y.getKind() == TemplateArgument::Integral)
+ // integral constant and whichever type did not come from an array
+ // bound.
+ if (Y.getKind() == TemplateArgument::Integral) {
+ if (Y.wasDeducedFromArrayBound())
+ return TemplateArgument(Context, Y.getAsIntegral(),
+ X.getParamTypeForDecl());
return Y;
+ }
// If we deduced two declarations, make sure they they refer to the
// same declaration.
@@ -253,9 +276,8 @@ checkDeducedTemplateArguments(ASTContext &Context,
if (Y.getKind() == TemplateArgument::Integral)
return Y;
- // If we deduced two null pointers, make sure they have the same type.
- if (Y.getKind() == TemplateArgument::NullPtr &&
- Context.hasSameType(X.getNullPtrType(), Y.getNullPtrType()))
+ // If we deduced two null pointers, they are the same.
+ if (Y.getKind() == TemplateArgument::NullPtr)
return X;
// All other combinations are incompatible.
@@ -285,19 +307,18 @@ checkDeducedTemplateArguments(ASTContext &Context,
}
/// \brief Deduce the value of the given non-type template parameter
-/// from the given constant.
+/// as the given deduced template argument. All non-type template parameter
+/// deduction is funneled through here.
static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
- Sema &S, NonTypeTemplateParmDecl *NTTP, const llvm::APSInt &Value,
- QualType ValueType, bool DeducedFromArrayBound, TemplateDeductionInfo &Info,
+ Sema &S, TemplateParameterList *TemplateParams,
+ NonTypeTemplateParmDecl *NTTP, const DeducedTemplateArgument &NewDeduced,
+ QualType ValueType, TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
- assert(NTTP->getDepth() == 0 &&
- "Cannot deduce non-type template argument with depth > 0");
-
- DeducedTemplateArgument NewDeduced(S.Context, Value, ValueType,
- DeducedFromArrayBound);
- DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
- Deduced[NTTP->getIndex()],
- NewDeduced);
+ assert(NTTP->getDepth() == Info.getDeducedDepth() &&
+ "deducing non-type template argument with wrong depth");
+
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(
+ S.Context, Deduced[NTTP->getIndex()], NewDeduced);
if (Result.isNull()) {
Info.Param = NTTP;
Info.FirstArg = Deduced[NTTP->getIndex()];
@@ -306,68 +327,77 @@ static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
}
Deduced[NTTP->getIndex()] = Result;
- return Sema::TDK_Success;
+ if (!S.getLangOpts().CPlusPlus1z)
+ return Sema::TDK_Success;
+
+ // FIXME: It's not clear how deduction of a parameter of reference
+ // type from an argument (of non-reference type) should be performed.
+ // For now, we just remove reference types from both sides and let
+ // the final check for matching types sort out the mess.
+ return DeduceTemplateArgumentsByTypeMatch(
+ S, TemplateParams, NTTP->getType().getNonReferenceType(),
+ ValueType.getNonReferenceType(), Info, Deduced, TDF_SkipNonDependent,
+ /*PartialOrdering=*/false,
+ /*ArrayBound=*/NewDeduced.wasDeducedFromArrayBound());
+}
+
+/// \brief Deduce the value of the given non-type template parameter
+/// from the given integral constant.
+static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
+ Sema &S, TemplateParameterList *TemplateParams,
+ NonTypeTemplateParmDecl *NTTP, const llvm::APSInt &Value,
+ QualType ValueType, bool DeducedFromArrayBound, TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP,
+ DeducedTemplateArgument(S.Context, Value, ValueType,
+ DeducedFromArrayBound),
+ ValueType, Info, Deduced);
+}
+
+/// \brief Deduce the value of the given non-type template parameter
+/// from the given null pointer template argument type.
+static Sema::TemplateDeductionResult DeduceNullPtrTemplateArgument(
+ Sema &S, TemplateParameterList *TemplateParams,
+ NonTypeTemplateParmDecl *NTTP, QualType NullPtrType,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ Expr *Value =
+ S.ImpCastExprToType(new (S.Context) CXXNullPtrLiteralExpr(
+ S.Context.NullPtrTy, NTTP->getLocation()),
+ NullPtrType, CK_NullToPointer)
+ .get();
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
+ DeducedTemplateArgument(Value),
+ Value->getType(), Info, Deduced);
}
/// \brief Deduce the value of the given non-type template parameter
/// from the given type- or value-dependent expression.
///
/// \returns true if deduction succeeded, false otherwise.
-static Sema::TemplateDeductionResult
-DeduceNonTypeTemplateArgument(Sema &S,
- NonTypeTemplateParmDecl *NTTP,
- Expr *Value,
- TemplateDeductionInfo &Info,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
- assert(NTTP->getDepth() == 0 &&
- "Cannot deduce non-type template argument with depth > 0");
- assert((Value->isTypeDependent() || Value->isValueDependent()) &&
- "Expression template argument must be type- or value-dependent.");
-
- DeducedTemplateArgument NewDeduced(Value);
- DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
- Deduced[NTTP->getIndex()],
- NewDeduced);
-
- if (Result.isNull()) {
- Info.Param = NTTP;
- Info.FirstArg = Deduced[NTTP->getIndex()];
- Info.SecondArg = NewDeduced;
- return Sema::TDK_Inconsistent;
- }
-
- Deduced[NTTP->getIndex()] = Result;
- return Sema::TDK_Success;
+static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
+ Sema &S, TemplateParameterList *TemplateParams,
+ NonTypeTemplateParmDecl *NTTP, Expr *Value, TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
+ DeducedTemplateArgument(Value),
+ Value->getType(), Info, Deduced);
}
/// \brief Deduce the value of the given non-type template parameter
/// from the given declaration.
///
/// \returns true if deduction succeeded, false otherwise.
-static Sema::TemplateDeductionResult
-DeduceNonTypeTemplateArgument(Sema &S,
- NonTypeTemplateParmDecl *NTTP,
- ValueDecl *D,
- TemplateDeductionInfo &Info,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
- assert(NTTP->getDepth() == 0 &&
- "Cannot deduce non-type template argument with depth > 0");
-
+static Sema::TemplateDeductionResult DeduceNonTypeTemplateArgument(
+ Sema &S, TemplateParameterList *TemplateParams,
+ NonTypeTemplateParmDecl *NTTP, ValueDecl *D, QualType T,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
D = D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
- TemplateArgument New(D, NTTP->getType());
- DeducedTemplateArgument NewDeduced(New);
- DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
- Deduced[NTTP->getIndex()],
- NewDeduced);
- if (Result.isNull()) {
- Info.Param = NTTP;
- Info.FirstArg = Deduced[NTTP->getIndex()];
- Info.SecondArg = NewDeduced;
- return Sema::TDK_Inconsistent;
- }
-
- Deduced[NTTP->getIndex()] = Result;
- return Sema::TDK_Success;
+ TemplateArgument New(D, T);
+ return DeduceNonTypeTemplateArgument(
+ S, TemplateParams, NTTP, DeducedTemplateArgument(New), T, Info, Deduced);
}
static Sema::TemplateDeductionResult
@@ -386,6 +416,10 @@ DeduceTemplateArguments(Sema &S,
if (TemplateTemplateParmDecl *TempParam
= dyn_cast<TemplateTemplateParmDecl>(ParamDecl)) {
+ // If we're not deducing at this depth, there's nothing to deduce.
+ if (TempParam->getDepth() != Info.getDeducedDepth())
+ return Sema::TDK_Success;
+
DeducedTemplateArgument NewDeduced(S.Context.getCanonicalTemplateName(Arg));
DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
Deduced[TempParam->getIndex()],
@@ -453,9 +487,9 @@ DeduceTemplateArguments(Sema &S,
// Perform template argument deduction on each template
// argument. Ignore any missing/extra arguments, since they could be
// filled in by default arguments.
- return DeduceTemplateArguments(S, TemplateParams, Param->getArgs(),
- Param->getNumArgs(), SpecArg->getArgs(),
- SpecArg->getNumArgs(), Info, Deduced,
+ return DeduceTemplateArguments(S, TemplateParams,
+ Param->template_arguments(),
+ SpecArg->template_arguments(), Info, Deduced,
/*NumberOfArgumentsMustMatch=*/false);
}
@@ -487,10 +521,9 @@ DeduceTemplateArguments(Sema &S,
return Result;
// Perform template argument deduction for the template arguments.
- return DeduceTemplateArguments(
- S, TemplateParams, Param->getArgs(), Param->getNumArgs(),
- SpecArg->getTemplateArgs().data(), SpecArg->getTemplateArgs().size(),
- Info, Deduced, /*NumberOfArgumentsMustMatch=*/true);
+ return DeduceTemplateArguments(S, TemplateParams, Param->template_arguments(),
+ SpecArg->getTemplateArgs().asArray(), Info,
+ Deduced, /*NumberOfArgumentsMustMatch=*/true);
}
/// \brief Determines whether the given type is an opaque type that
@@ -589,7 +622,7 @@ public:
for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
unsigned Depth, Index;
std::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
- if (Depth == 0 && !SawIndices[Index]) {
+ if (Depth == Info.getDeducedDepth() && !SawIndices[Index]) {
SawIndices[Index] = true;
// Save the deduced template argument for the parameter pack expanded
@@ -620,7 +653,8 @@ public:
S.CurrentInstantiationScope->getPartiallySubstitutedPack(
&ExplicitArgs, &NumExplicitArgs);
if (PartiallySubstitutedPack &&
- getDepthAndIndex(PartiallySubstitutedPack).second == Pack.Index)
+ getDepthAndIndex(PartiallySubstitutedPack) ==
+ std::make_pair(Info.getDeducedDepth(), Pack.Index))
Pack.New.append(ExplicitArgs, ExplicitArgs + NumExplicitArgs);
}
}
@@ -863,12 +897,12 @@ static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType,
if (ParamQs == ArgQs)
return false;
-
+
// Mismatched (but not missing) Objective-C GC attributes.
- if (ParamQs.getObjCGCAttr() != ArgQs.getObjCGCAttr() &&
+ if (ParamQs.getObjCGCAttr() != ArgQs.getObjCGCAttr() &&
ParamQs.hasObjCGCAttr())
return true;
-
+
// Mismatched (but not missing) address spaces.
if (ParamQs.getAddressSpace() != ArgQs.getAddressSpace() &&
ParamQs.hasAddressSpace())
@@ -878,7 +912,7 @@ static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType,
if (ParamQs.getObjCLifetime() != ArgQs.getObjCLifetime() &&
ParamQs.hasObjCLifetime())
return true;
-
+
// CVR qualifier superset.
return (ParamQs.getCVRQualifiers() != ArgQs.getCVRQualifiers()) &&
((ParamQs.getCVRQualifiers() | ArgQs.getCVRQualifiers())
@@ -901,9 +935,9 @@ bool Sema::isSameOrCompatibleFunctionType(CanQualType Param,
if (!ParamFunction || !ArgFunction)
return Param == Arg;
- // Noreturn adjustment.
+ // Noreturn and noexcept adjustment.
QualType AdjustedParam;
- if (IsNoReturnConversion(Param, Arg, AdjustedParam))
+ if (IsFunctionConversion(Param, Arg, AdjustedParam))
return Arg == Context.getCanonicalType(AdjustedParam);
// FIXME: Compatible calling conventions.
@@ -942,7 +976,8 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned TDF,
- bool PartialOrdering) {
+ bool PartialOrdering,
+ bool DeducedFromArrayBound) {
// We only want to look at the canonical types, since typedefs and
// sugar are not part of template argument deduction.
QualType Param = S.Context.getCanonicalType(ParamIn);
@@ -1057,10 +1092,12 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// cv-list T
if (const TemplateTypeParmType *TemplateTypeParm
= Param->getAs<TemplateTypeParmType>()) {
- // Just skip any attempts to deduce from a placeholder type.
- if (Arg->isPlaceholderType())
+ // Just skip any attempts to deduce from a placeholder type or a parameter
+ // at a different depth.
+ if (Arg->isPlaceholderType() ||
+ Info.getDeducedDepth() != TemplateTypeParm->getDepth())
return Sema::TDK_Success;
-
+
unsigned Index = TemplateTypeParm->getIndex();
bool RecanonicalizeArg = false;
@@ -1085,7 +1122,8 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return Sema::TDK_Underqualified;
}
- assert(TemplateTypeParm->getDepth() == 0 && "Can't deduce with depth > 0");
+ assert(TemplateTypeParm->getDepth() == Info.getDeducedDepth() &&
+ "saw template type parameter with wrong depth");
assert(Arg != S.Context.OverloadTy && "Unresolved overloaded function");
QualType DeducedType = Arg;
@@ -1100,7 +1138,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
DeducedQs.removeAddressSpace();
if (ParamQs.hasObjCLifetime())
DeducedQs.removeObjCLifetime();
-
+
// Objective-C ARC:
// If template deduction would produce a lifetime qualifier on a type
// that is not a lifetime type, template argument deduction fails.
@@ -1109,9 +1147,9 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
Info.FirstArg = TemplateArgument(Param);
Info.SecondArg = TemplateArgument(Arg);
- return Sema::TDK_Underqualified;
+ return Sema::TDK_Underqualified;
}
-
+
// Objective-C ARC:
// If template deduction would produce an argument type with lifetime type
// but no lifetime qualifier, the __strong lifetime qualifier is inferred.
@@ -1119,14 +1157,14 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
DeducedType->isObjCLifetimeType() &&
!DeducedQs.hasObjCLifetime())
DeducedQs.setObjCLifetime(Qualifiers::OCL_Strong);
-
+
DeducedType = S.Context.getQualifiedType(DeducedType.getUnqualifiedType(),
DeducedQs);
-
+
if (RecanonicalizeArg)
DeducedType = S.Context.getCanonicalType(DeducedType);
- DeducedTemplateArgument NewDeduced(DeducedType);
+ DeducedTemplateArgument NewDeduced(DeducedType, DeducedFromArrayBound);
DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
Deduced[Index],
NewDeduced);
@@ -1163,7 +1201,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
if (Param.getCVRQualifiers() != Arg.getCVRQualifiers())
return Sema::TDK_NonDeducedMismatch;
}
-
+
// If the parameter type is not dependent, there is nothing to deduce.
if (!Param->isDependentType()) {
if (!(TDF & TDF_SkipNonDependent)) {
@@ -1193,7 +1231,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
case Type::Class: llvm_unreachable("deducing non-canonical type: " #Class);
#define TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
-
+
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
llvm_unreachable("Type nodes handled above");
@@ -1211,20 +1249,20 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
case Type::ObjCObjectPointer: {
if (TDF & TDF_SkipNonDependent)
return Sema::TDK_Success;
-
+
if (TDF & TDF_IgnoreQualifiers) {
Param = Param.getUnqualifiedType();
Arg = Arg.getUnqualifiedType();
}
-
+
return Param == Arg? Sema::TDK_Success : Sema::TDK_NonDeducedMismatch;
}
-
- // _Complex T [placeholder extension]
+
+ // _Complex T [placeholder extension]
case Type::Complex:
if (const ComplexType *ComplexArg = Arg->getAs<ComplexType>())
- return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
- cast<ComplexType>(Param)->getElementType(),
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<ComplexType>(Param)->getElementType(),
ComplexArg->getElementType(),
Info, Deduced, TDF);
@@ -1337,18 +1375,18 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// Determine the array bound is something we can deduce.
NonTypeTemplateParmDecl *NTTP
- = getDeducedParameterFromExpr(DependentArrayParm->getSizeExpr());
+ = getDeducedParameterFromExpr(Info, DependentArrayParm->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
// We can perform template argument deduction for the given non-type
// template parameter.
- assert(NTTP->getDepth() == 0 &&
- "Cannot deduce non-type template argument at depth > 0");
+ assert(NTTP->getDepth() == Info.getDeducedDepth() &&
+ "saw non-type template parameter with wrong depth");
if (const ConstantArrayType *ConstantArrayArg
= dyn_cast<ConstantArrayType>(ArrayArg)) {
llvm::APSInt Size(ConstantArrayArg->getSize());
- return DeduceNonTypeTemplateArgument(S, NTTP, Size,
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, Size,
S.Context.getSizeType(),
/*ArrayBound=*/true,
Info, Deduced);
@@ -1356,7 +1394,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
if (const DependentSizedArrayType *DependentArrayArg
= dyn_cast<DependentSizedArrayType>(ArrayArg))
if (DependentArrayArg->getSizeExpr())
- return DeduceNonTypeTemplateArgument(S, NTTP,
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
DependentArrayArg->getSizeExpr(),
Info, Deduced);
@@ -1549,7 +1587,7 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
QualType(MemPtrParam->getClass(), 0),
QualType(MemPtrArg->getClass(), 0),
- Info, Deduced,
+ Info, Deduced,
TDF & TDF_IgnoreQualifiers);
}
@@ -1580,15 +1618,15 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
// Make sure that the vectors have the same number of elements.
if (VectorParam->getNumElements() != VectorArg->getNumElements())
return Sema::TDK_NonDeducedMismatch;
-
+
// Perform deduction on the element types.
return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
VectorParam->getElementType(),
VectorArg->getElementType(),
Info, Deduced, TDF);
}
-
- if (const DependentSizedExtVectorType *VectorArg
+
+ if (const DependentSizedExtVectorType *VectorArg
= dyn_cast<DependentSizedExtVectorType>(Arg)) {
// We can't check the number of elements, since the argument has a
// dependent number of elements. This can only occur during partial
@@ -1600,10 +1638,10 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
VectorArg->getElementType(),
Info, Deduced, TDF);
}
-
+
return Sema::TDK_NonDeducedMismatch;
}
-
+
// (clang extension)
//
// T __attribute__(((ext_vector_type(N))))
@@ -1619,20 +1657,24 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
VectorArg->getElementType(),
Info, Deduced, TDF))
return Result;
-
+
// Perform deduction on the vector size, if we can.
NonTypeTemplateParmDecl *NTTP
- = getDeducedParameterFromExpr(VectorParam->getSizeExpr());
+ = getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
ArgSize = VectorArg->getNumElements();
- return DeduceNonTypeTemplateArgument(S, NTTP, ArgSize, S.Context.IntTy,
- false, Info, Deduced);
+ // Note that we use the "array bound" rules here; just like in that
+ // case, we don't have any particular type for the vector size, but
+ // we can provide one if necessary.
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP, ArgSize,
+ S.Context.IntTy, true, Info,
+ Deduced);
}
-
- if (const DependentSizedExtVectorType *VectorArg
+
+ if (const DependentSizedExtVectorType *VectorArg
= dyn_cast<DependentSizedExtVectorType>(Arg)) {
// Perform deduction on the element types.
if (Sema::TemplateDeductionResult Result
@@ -1641,20 +1683,21 @@ DeduceTemplateArgumentsByTypeMatch(Sema &S,
VectorArg->getElementType(),
Info, Deduced, TDF))
return Result;
-
+
// Perform deduction on the vector size, if we can.
NonTypeTemplateParmDecl *NTTP
- = getDeducedParameterFromExpr(VectorParam->getSizeExpr());
+ = getDeducedParameterFromExpr(Info, VectorParam->getSizeExpr());
if (!NTTP)
return Sema::TDK_Success;
-
- return DeduceNonTypeTemplateArgument(S, NTTP, VectorArg->getSizeExpr(),
+
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
+ VectorArg->getSizeExpr(),
Info, Deduced);
}
-
+
return Sema::TDK_NonDeducedMismatch;
}
-
+
case Type::TypeOfExpr:
case Type::TypeOf:
case Type::DependentName:
@@ -1751,18 +1794,24 @@ DeduceTemplateArguments(Sema &S,
case TemplateArgument::Expression: {
if (NonTypeTemplateParmDecl *NTTP
- = getDeducedParameterFromExpr(Param.getAsExpr())) {
+ = getDeducedParameterFromExpr(Info, Param.getAsExpr())) {
if (Arg.getKind() == TemplateArgument::Integral)
- return DeduceNonTypeTemplateArgument(S, NTTP,
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
Arg.getAsIntegral(),
Arg.getIntegralType(),
/*ArrayBound=*/false,
Info, Deduced);
- if (Arg.getKind() == TemplateArgument::Expression)
- return DeduceNonTypeTemplateArgument(S, NTTP, Arg.getAsExpr(),
+ if (Arg.getKind() == TemplateArgument::NullPtr)
+ return DeduceNullPtrTemplateArgument(S, TemplateParams, NTTP,
+ Arg.getNullPtrType(),
Info, Deduced);
+ if (Arg.getKind() == TemplateArgument::Expression)
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
+ Arg.getAsExpr(), Info, Deduced);
if (Arg.getKind() == TemplateArgument::Declaration)
- return DeduceNonTypeTemplateArgument(S, NTTP, Arg.getAsDecl(),
+ return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
+ Arg.getAsDecl(),
+ Arg.getParamTypeForDecl(),
Info, Deduced);
Info.FirstArg = Param;
@@ -1788,45 +1837,34 @@ DeduceTemplateArguments(Sema &S,
///
/// \returns true if there is another template argument (which will be at
/// \c Args[ArgIdx]), false otherwise.
-static bool hasTemplateArgumentForDeduction(const TemplateArgument *&Args,
- unsigned &ArgIdx,
- unsigned &NumArgs) {
- if (ArgIdx == NumArgs)
+static bool hasTemplateArgumentForDeduction(ArrayRef<TemplateArgument> &Args,
+ unsigned &ArgIdx) {
+ if (ArgIdx == Args.size())
return false;
const TemplateArgument &Arg = Args[ArgIdx];
if (Arg.getKind() != TemplateArgument::Pack)
return true;
- assert(ArgIdx == NumArgs - 1 && "Pack not at the end of argument list?");
- Args = Arg.pack_begin();
- NumArgs = Arg.pack_size();
+ assert(ArgIdx == Args.size() - 1 && "Pack not at the end of argument list?");
+ Args = Arg.pack_elements();
ArgIdx = 0;
- return ArgIdx < NumArgs;
+ return ArgIdx < Args.size();
}
/// \brief Determine whether the given set of template arguments has a pack
/// expansion that is not the last template argument.
-static bool hasPackExpansionBeforeEnd(const TemplateArgument *Args,
- unsigned NumArgs) {
- unsigned ArgIdx = 0;
- while (ArgIdx < NumArgs) {
- const TemplateArgument &Arg = Args[ArgIdx];
-
- // Unwrap argument packs.
- if (Args[ArgIdx].getKind() == TemplateArgument::Pack) {
- Args = Arg.pack_begin();
- NumArgs = Arg.pack_size();
- ArgIdx = 0;
- continue;
- }
+static bool hasPackExpansionBeforeEnd(ArrayRef<TemplateArgument> Args) {
+ bool FoundPackExpansion = false;
+ for (const auto &A : Args) {
+ if (FoundPackExpansion)
+ return true;
- ++ArgIdx;
- if (ArgIdx == NumArgs)
- return false;
+ if (A.getKind() == TemplateArgument::Pack)
+ return hasPackExpansionBeforeEnd(A.pack_elements());
- if (Arg.isPackExpansion())
- return true;
+ if (A.isPackExpansion())
+ FoundPackExpansion = true;
}
return false;
@@ -1834,8 +1872,8 @@ static bool hasPackExpansionBeforeEnd(const TemplateArgument *Args,
static Sema::TemplateDeductionResult
DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
- const TemplateArgument *Params, unsigned NumParams,
- const TemplateArgument *Args, unsigned NumArgs,
+ ArrayRef<TemplateArgument> Params,
+ ArrayRef<TemplateArgument> Args,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
bool NumberOfArgumentsMustMatch) {
@@ -1843,7 +1881,7 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// If the template argument list of P contains a pack expansion that is not
// the last template argument, the entire template argument list is a
// non-deduced context.
- if (hasPackExpansionBeforeEnd(Params, NumParams))
+ if (hasPackExpansionBeforeEnd(Params))
return Sema::TDK_Success;
// C++0x [temp.deduct.type]p9:
@@ -1851,21 +1889,20 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// respective template argument list P is compared with the corresponding
// argument Ai of the corresponding template argument list of A.
unsigned ArgIdx = 0, ParamIdx = 0;
- for (; hasTemplateArgumentForDeduction(Params, ParamIdx, NumParams);
- ++ParamIdx) {
+ for (; hasTemplateArgumentForDeduction(Params, ParamIdx); ++ParamIdx) {
if (!Params[ParamIdx].isPackExpansion()) {
// The simple case: deduce template arguments by matching Pi and Ai.
// Check whether we have enough arguments.
- if (!hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs))
+ if (!hasTemplateArgumentForDeduction(Args, ArgIdx))
return NumberOfArgumentsMustMatch ? Sema::TDK_TooFewArguments
: Sema::TDK_Success;
- if (Args[ArgIdx].isPackExpansion()) {
- // FIXME: We follow the logic of C++0x [temp.deduct.type]p22 here,
- // but applied to pack expansions that are template arguments.
+ // C++1z [temp.deduct.type]p9:
+ // During partial ordering, if Ai was originally a pack expansion [and]
+ // Pi is not a pack expansion, template argument deduction fails.
+ if (Args[ArgIdx].isPackExpansion())
return Sema::TDK_MiscellaneousDeductionFailure;
- }
// Perform deduction for this Pi/Ai pair.
if (Sema::TemplateDeductionResult Result
@@ -1899,7 +1936,7 @@ DeduceTemplateArguments(Sema &S, TemplateParameterList *TemplateParams,
// expanded by this pack expansion (the outer index) and for each
// template argument (the inner SmallVectors).
bool HasAnyArguments = false;
- for (; hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs); ++ArgIdx) {
+ for (; hasTemplateArgumentForDeduction(Args, ArgIdx); ++ArgIdx) {
HasAnyArguments = true;
// Deduce template arguments from the pattern.
@@ -1927,16 +1964,21 @@ DeduceTemplateArguments(Sema &S,
const TemplateArgumentList &ArgList,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
- return DeduceTemplateArguments(S, TemplateParams,
- ParamList.data(), ParamList.size(),
- ArgList.data(), ArgList.size(),
- Info, Deduced, false);
+ return DeduceTemplateArguments(S, TemplateParams, ParamList.asArray(),
+ ArgList.asArray(), Info, Deduced,
+ /*NumberOfArgumentsMustMatch*/false);
}
/// \brief Determine whether two template arguments are the same.
static bool isSameTemplateArg(ASTContext &Context,
- const TemplateArgument &X,
- const TemplateArgument &Y) {
+ TemplateArgument X,
+ const TemplateArgument &Y,
+ bool PackExpansionMatchesPack = false) {
+ // If we're checking deduced arguments (X) against original arguments (Y),
+ // we will have flattened packs to non-expansions in X.
+ if (PackExpansionMatchesPack && X.isPackExpansion() && !Y.isPackExpansion())
+ X = X.getPackExpansionPattern();
+
if (X.getKind() != Y.getKind())
return false;
@@ -1962,7 +2004,7 @@ static bool isSameTemplateArg(ASTContext &Context,
Y.getAsTemplateOrTemplatePattern()).getAsVoidPointer();
case TemplateArgument::Integral:
- return X.getAsIntegral() == Y.getAsIntegral();
+ return hasSameExtendedValue(X.getAsIntegral(), Y.getAsIntegral());
case TemplateArgument::Expression: {
llvm::FoldingSetNodeID XID, YID;
@@ -1979,7 +2021,7 @@ static bool isSameTemplateArg(ASTContext &Context,
XPEnd = X.pack_end(),
YP = Y.pack_begin();
XP != XPEnd; ++XP, ++YP)
- if (!isSameTemplateArg(Context, *XP, *YP))
+ if (!isSameTemplateArg(Context, *XP, *YP, PackExpansionMatchesPack))
return false;
return true;
@@ -1991,48 +2033,47 @@ static bool isSameTemplateArg(ASTContext &Context,
/// \brief Allocate a TemplateArgumentLoc where all locations have
/// been initialized to the given location.
///
-/// \param S The semantic analysis object.
-///
/// \param Arg The template argument we are producing template argument
/// location information for.
///
/// \param NTTPType For a declaration template argument, the type of
/// the non-type template parameter that corresponds to this template
-/// argument.
+/// argument. Can be null if no type sugar is available to add to the
+/// type from the template argument.
///
/// \param Loc The source location to use for the resulting template
/// argument.
-static TemplateArgumentLoc
-getTrivialTemplateArgumentLoc(Sema &S,
- const TemplateArgument &Arg,
- QualType NTTPType,
- SourceLocation Loc) {
+TemplateArgumentLoc
+Sema::getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
+ QualType NTTPType, SourceLocation Loc) {
switch (Arg.getKind()) {
case TemplateArgument::Null:
llvm_unreachable("Can't get a NULL template argument here");
case TemplateArgument::Type:
- return TemplateArgumentLoc(Arg,
- S.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
+ return TemplateArgumentLoc(
+ Arg, Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
case TemplateArgument::Declaration: {
- Expr *E
- = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
- .getAs<Expr>();
+ if (NTTPType.isNull())
+ NTTPType = Arg.getParamTypeForDecl();
+ Expr *E = BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
+ .getAs<Expr>();
return TemplateArgumentLoc(TemplateArgument(E), E);
}
case TemplateArgument::NullPtr: {
- Expr *E
- = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
- .getAs<Expr>();
+ if (NTTPType.isNull())
+ NTTPType = Arg.getNullPtrType();
+ Expr *E = BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
+ .getAs<Expr>();
return TemplateArgumentLoc(TemplateArgument(NTTPType, /*isNullPtr*/true),
E);
}
case TemplateArgument::Integral: {
- Expr *E
- = S.BuildExpressionFromIntegralTemplateArgument(Arg, Loc).getAs<Expr>();
+ Expr *E =
+ BuildExpressionFromIntegralTemplateArgument(Arg, Loc).getAs<Expr>();
return TemplateArgumentLoc(TemplateArgument(E), E);
}
@@ -2041,18 +2082,16 @@ getTrivialTemplateArgumentLoc(Sema &S,
NestedNameSpecifierLocBuilder Builder;
TemplateName Template = Arg.getAsTemplate();
if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
- Builder.MakeTrivial(S.Context, DTN->getQualifier(), Loc);
+ Builder.MakeTrivial(Context, DTN->getQualifier(), Loc);
else if (QualifiedTemplateName *QTN =
Template.getAsQualifiedTemplateName())
- Builder.MakeTrivial(S.Context, QTN->getQualifier(), Loc);
-
+ Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
+
if (Arg.getKind() == TemplateArgument::Template)
- return TemplateArgumentLoc(Arg,
- Builder.getWithLocInContext(S.Context),
+ return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(Context),
Loc);
-
-
- return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(S.Context),
+
+ return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(Context),
Loc, Loc);
}
@@ -2074,39 +2113,21 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
DeducedTemplateArgument Arg,
NamedDecl *Template,
TemplateDeductionInfo &Info,
- bool InFunctionTemplate,
+ bool IsDeduced,
SmallVectorImpl<TemplateArgument> &Output) {
- // First, for a non-type template parameter type that is
- // initialized by a declaration, we need the type of the
- // corresponding non-type template parameter.
- QualType NTTPType;
- if (NonTypeTemplateParmDecl *NTTP =
- dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- NTTPType = NTTP->getType();
- if (NTTPType->isDependentType()) {
- TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Output);
- NTTPType = S.SubstType(NTTPType,
- MultiLevelTemplateArgumentList(TemplateArgs),
- NTTP->getLocation(),
- NTTP->getDeclName());
- if (NTTPType.isNull())
- return true;
- }
- }
-
auto ConvertArg = [&](DeducedTemplateArgument Arg,
unsigned ArgumentPackIndex) {
// Convert the deduced template argument into a template
// argument that we can check, almost as if the user had written
// the template argument explicitly.
TemplateArgumentLoc ArgLoc =
- getTrivialTemplateArgumentLoc(S, Arg, NTTPType, Info.getLocation());
+ S.getTrivialTemplateArgumentLoc(Arg, QualType(), Info.getLocation());
// Check the template argument, converting it as necessary.
return S.CheckTemplateArgument(
Param, ArgLoc, Template, Template->getLocation(),
Template->getSourceRange().getEnd(), ArgumentPackIndex, Output,
- InFunctionTemplate
+ IsDeduced
? (Arg.wasDeducedFromArrayBound() ? Sema::CTAK_DeducedFromArrayBound
: Sema::CTAK_Deduced)
: Sema::CTAK_Specified);
@@ -2132,22 +2153,28 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
}
// If the pack is empty, we still need to substitute into the parameter
- // itself, in case that substitution fails. For non-type parameters, we did
- // this above. For type parameters, no substitution is ever required.
- auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Param);
- if (TTP && PackedArgsBuilder.empty()) {
- // Set up a template instantiation context.
+ // itself, in case that substitution fails.
+ if (PackedArgsBuilder.empty()) {
LocalInstantiationScope Scope(S);
- Sema::InstantiatingTemplate Inst(S, Template->getLocation(), Template,
- TTP, Output,
- Template->getSourceRange());
- if (Inst.isInvalid())
- return true;
-
TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack, Output);
- if (!S.SubstDecl(TTP, S.CurContext,
- MultiLevelTemplateArgumentList(TemplateArgs)))
- return true;
+ MultiLevelTemplateArgumentList Args(TemplateArgs);
+
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ Sema::InstantiatingTemplate Inst(S, Template->getLocation(), Template,
+ NTTP, Output,
+ Template->getSourceRange());
+ if (Inst.isInvalid() ||
+ S.SubstType(NTTP->getType(), Args, NTTP->getLocation(),
+ NTTP->getDeclName()).isNull())
+ return true;
+ } else if (auto *TTP = dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ Sema::InstantiatingTemplate Inst(S, Template->getLocation(), Template,
+ TTP, Output,
+ Template->getSourceRange());
+ if (Inst.isInvalid() || !S.SubstDecl(TTP, S.CurContext, Args))
+ return true;
+ }
+ // For type parameters, no substitution is ever required.
}
// Create the resulting argument pack.
@@ -2159,44 +2186,169 @@ ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
return ConvertArg(Arg, 0);
}
-/// Complete template argument deduction for a class template partial
-/// specialization.
-static Sema::TemplateDeductionResult
-FinishTemplateArgumentDeduction(Sema &S,
- ClassTemplatePartialSpecializationDecl *Partial,
- const TemplateArgumentList &TemplateArgs,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- TemplateDeductionInfo &Info) {
- // Unevaluated SFINAE context.
- EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
- Sema::SFINAETrap Trap(S);
+// FIXME: This should not be a template, but
+// ClassTemplatePartialSpecializationDecl sadly does not derive from
+// TemplateDecl.
+template<typename TemplateDeclT>
+static Sema::TemplateDeductionResult ConvertDeducedTemplateArguments(
+ Sema &S, TemplateDeclT *Template, bool IsDeduced,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ TemplateDeductionInfo &Info, SmallVectorImpl<TemplateArgument> &Builder,
+ LocalInstantiationScope *CurrentInstantiationScope = nullptr,
+ unsigned NumAlreadyConverted = 0, bool PartialOverloading = false) {
+ TemplateParameterList *TemplateParams = Template->getTemplateParameters();
- Sema::ContextRAII SavedContext(S, Partial);
+ for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
+ NamedDecl *Param = TemplateParams->getParam(I);
- // C++ [temp.deduct.type]p2:
- // [...] or if any template argument remains neither deduced nor
- // explicitly specified, template argument deduction fails.
- SmallVector<TemplateArgument, 4> Builder;
- TemplateParameterList *PartialParams = Partial->getTemplateParameters();
- for (unsigned I = 0, N = PartialParams->size(); I != N; ++I) {
- NamedDecl *Param = PartialParams->getParam(I);
- if (Deduced[I].isNull()) {
- Info.Param = makeTemplateParameter(Param);
+ if (!Deduced[I].isNull()) {
+ if (I < NumAlreadyConverted) {
+ // We have already fully type-checked and converted this
+ // argument, because it was explicitly-specified. Just record the
+ // presence of this argument.
+ Builder.push_back(Deduced[I]);
+ // We may have had explicitly-specified template arguments for a
+ // template parameter pack (that may or may not have been extended
+ // via additional deduced arguments).
+ if (Param->isParameterPack() && CurrentInstantiationScope) {
+ if (CurrentInstantiationScope->getPartiallySubstitutedPack() ==
+ Param) {
+ // Forget the partially-substituted pack; its substitution is now
+ // complete.
+ CurrentInstantiationScope->ResetPartiallySubstitutedPack();
+ }
+ }
+ continue;
+ }
+
+ // We have deduced this argument, so it still needs to be
+ // checked and converted.
+ if (ConvertDeducedTemplateArgument(S, Param, Deduced[I], Template, Info,
+ IsDeduced, Builder)) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder));
+ return Sema::TDK_SubstitutionFailure;
+ }
+
+ continue;
+ }
+
+ // C++0x [temp.arg.explicit]p3:
+ // A trailing template parameter pack (14.5.3) not otherwise deduced will
+ // be deduced to an empty sequence of template arguments.
+ // FIXME: Where did the word "trailing" come from?
+ if (Param->isTemplateParameterPack()) {
+ // We may have had explicitly-specified template arguments for this
+ // template parameter pack. If so, our empty deduction extends the
+ // explicitly-specified set (C++0x [temp.arg.explicit]p9).
+ const TemplateArgument *ExplicitArgs;
+ unsigned NumExplicitArgs;
+ if (CurrentInstantiationScope &&
+ CurrentInstantiationScope->getPartiallySubstitutedPack(
+ &ExplicitArgs, &NumExplicitArgs) == Param) {
+ Builder.push_back(TemplateArgument(
+ llvm::makeArrayRef(ExplicitArgs, NumExplicitArgs)));
+
+ // Forget the partially-substituted pack; its substitution is now
+ // complete.
+ CurrentInstantiationScope->ResetPartiallySubstitutedPack();
+ } else {
+ // Go through the motions of checking the empty argument pack against
+ // the parameter pack.
+ DeducedTemplateArgument DeducedPack(TemplateArgument::getEmptyPack());
+ if (ConvertDeducedTemplateArgument(S, Param, DeducedPack, Template,
+ Info, IsDeduced, Builder)) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder));
+ return Sema::TDK_SubstitutionFailure;
+ }
+ }
+ continue;
+ }
+
+ // Substitute into the default template argument, if available.
+ bool HasDefaultArg = false;
+ TemplateDecl *TD = dyn_cast<TemplateDecl>(Template);
+ if (!TD) {
+ assert(isa<ClassTemplatePartialSpecializationDecl>(Template));
return Sema::TDK_Incomplete;
}
- // We have deduced this argument, so it still needs to be
- // checked and converted.
- if (ConvertDeducedTemplateArgument(S, Param, Deduced[I],
- Partial, Info, false,
- Builder)) {
- Info.Param = makeTemplateParameter(Param);
+ TemplateArgumentLoc DefArg = S.SubstDefaultTemplateArgumentIfAvailable(
+ TD, TD->getLocation(), TD->getSourceRange().getEnd(), Param, Builder,
+ HasDefaultArg);
+
+ // If there was no default argument, deduction is incomplete.
+ if (DefArg.getArgument().isNull()) {
+ Info.Param = makeTemplateParameter(
+ const_cast<NamedDecl *>(TemplateParams->getParam(I)));
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder));
+ if (PartialOverloading) break;
+
+ return HasDefaultArg ? Sema::TDK_SubstitutionFailure
+ : Sema::TDK_Incomplete;
+ }
+
+ // Check whether we can actually use the default argument.
+ if (S.CheckTemplateArgument(Param, DefArg, TD, TD->getLocation(),
+ TD->getSourceRange().getEnd(), 0, Builder,
+ Sema::CTAK_Specified)) {
+ Info.Param = makeTemplateParameter(
+ const_cast<NamedDecl *>(TemplateParams->getParam(I)));
// FIXME: These template arguments are temporary. Free them!
Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder));
return Sema::TDK_SubstitutionFailure;
}
+
+ // If we get here, we successfully used the default template argument.
}
+ return Sema::TDK_Success;
+}
+
+DeclContext *getAsDeclContextOrEnclosing(Decl *D) {
+ if (auto *DC = dyn_cast<DeclContext>(D))
+ return DC;
+ return D->getDeclContext();
+}
+
+template<typename T> struct IsPartialSpecialization {
+ static constexpr bool value = false;
+};
+template<>
+struct IsPartialSpecialization<ClassTemplatePartialSpecializationDecl> {
+ static constexpr bool value = true;
+};
+template<>
+struct IsPartialSpecialization<VarTemplatePartialSpecializationDecl> {
+ static constexpr bool value = true;
+};
+
+/// Complete template argument deduction for a partial specialization.
+template <typename T>
+static typename std::enable_if<IsPartialSpecialization<T>::value,
+ Sema::TemplateDeductionResult>::type
+FinishTemplateArgumentDeduction(
+ Sema &S, T *Partial, bool IsPartialOrdering,
+ const TemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ TemplateDeductionInfo &Info) {
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
+ Sema::SFINAETrap Trap(S);
+
+ Sema::ContextRAII SavedContext(S, getAsDeclContextOrEnclosing(Partial));
+
+ // C++ [temp.deduct.type]p2:
+ // [...] or if any template argument remains neither deduced nor
+ // explicitly specified, template argument deduction fails.
+ SmallVector<TemplateArgument, 4> Builder;
+ if (auto Result = ConvertDeducedTemplateArguments(
+ S, Partial, IsPartialOrdering, Deduced, Info, Builder))
+ return Result;
+
// Form the template argument list from the deduced template arguments.
TemplateArgumentList *DeducedArgumentList
= TemplateArgumentList::CreateCopy(S.Context, Builder);
@@ -2209,11 +2361,11 @@ FinishTemplateArgumentDeduction(Sema &S,
// and are equivalent to the template arguments originally provided
// to the class template.
LocalInstantiationScope InstScope(S);
- ClassTemplateDecl *ClassTemplate = Partial->getSpecializedTemplate();
- const ASTTemplateArgumentListInfo *PartialTemplArgInfo
- = Partial->getTemplateArgsAsWritten();
- const TemplateArgumentLoc *PartialTemplateArgs
- = PartialTemplArgInfo->getTemplateArgs();
+ auto *Template = Partial->getSpecializedTemplate();
+ const ASTTemplateArgumentListInfo *PartialTemplArgInfo =
+ Partial->getTemplateArgsAsWritten();
+ const TemplateArgumentLoc *PartialTemplateArgs =
+ PartialTemplArgInfo->getTemplateArgs();
TemplateArgumentListInfo InstArgs(PartialTemplArgInfo->LAngleLoc,
PartialTemplArgInfo->RAngleLoc);
@@ -2224,21 +2376,19 @@ FinishTemplateArgumentDeduction(Sema &S,
if (ParamIdx >= Partial->getTemplateParameters()->size())
ParamIdx = Partial->getTemplateParameters()->size() - 1;
- Decl *Param
- = const_cast<NamedDecl *>(
- Partial->getTemplateParameters()->getParam(ParamIdx));
+ Decl *Param = const_cast<NamedDecl *>(
+ Partial->getTemplateParameters()->getParam(ParamIdx));
Info.Param = makeTemplateParameter(Param);
Info.FirstArg = PartialTemplateArgs[ArgIdx].getArgument();
return Sema::TDK_SubstitutionFailure;
}
SmallVector<TemplateArgument, 4> ConvertedInstArgs;
- if (S.CheckTemplateArgumentList(ClassTemplate, Partial->getLocation(),
- InstArgs, false, ConvertedInstArgs))
+ if (S.CheckTemplateArgumentList(Template, Partial->getLocation(), InstArgs,
+ false, ConvertedInstArgs))
return Sema::TDK_SubstitutionFailure;
- TemplateParameterList *TemplateParams
- = ClassTemplate->getTemplateParameters();
+ TemplateParameterList *TemplateParams = Template->getTemplateParameters();
for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) {
TemplateArgument InstArg = ConvertedInstArgs.data()[I];
if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg)) {
@@ -2255,6 +2405,48 @@ FinishTemplateArgumentDeduction(Sema &S,
return Sema::TDK_Success;
}
+/// Complete template argument deduction for a class or variable template,
+/// when partial ordering against a partial specialization.
+// FIXME: Factor out duplication with partial specialization version above.
+Sema::TemplateDeductionResult FinishTemplateArgumentDeduction(
+ Sema &S, TemplateDecl *Template, bool PartialOrdering,
+ const TemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ TemplateDeductionInfo &Info) {
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
+ Sema::SFINAETrap Trap(S);
+
+ Sema::ContextRAII SavedContext(S, getAsDeclContextOrEnclosing(Template));
+
+ // C++ [temp.deduct.type]p2:
+ // [...] or if any template argument remains neither deduced nor
+ // explicitly specified, template argument deduction fails.
+ SmallVector<TemplateArgument, 4> Builder;
+ if (auto Result = ConvertDeducedTemplateArguments(
+ S, Template, /*IsDeduced*/PartialOrdering, Deduced, Info, Builder))
+ return Result;
+
+ // Check that we produced the correct argument list.
+ TemplateParameterList *TemplateParams = Template->getTemplateParameters();
+ for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) {
+ TemplateArgument InstArg = Builder[I];
+ if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg,
+ /*PackExpansionMatchesPack*/true)) {
+ Info.Param = makeTemplateParameter(TemplateParams->getParam(I));
+ Info.FirstArg = TemplateArgs[I];
+ Info.SecondArg = InstArg;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+ }
+
+ if (Trap.hasErrorOccurred())
+ return Sema::TDK_SubstitutionFailure;
+
+ return Sema::TDK_Success;
+}
+
+
/// \brief Perform template argument deduction to determine whether
/// the given template arguments match the given class template
/// partial specialization per C++ [temp.class.spec.match].
@@ -2293,112 +2485,13 @@ Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
if (Trap.hasErrorOccurred())
return Sema::TDK_SubstitutionFailure;
- return ::FinishTemplateArgumentDeduction(*this, Partial, TemplateArgs,
- Deduced, Info);
-}
-
-/// Complete template argument deduction for a variable template partial
-/// specialization.
-/// TODO: Unify with ClassTemplatePartialSpecializationDecl version?
-/// May require unifying ClassTemplate(Partial)SpecializationDecl and
-/// VarTemplate(Partial)SpecializationDecl with a new data
-/// structure Template(Partial)SpecializationDecl, and
-/// using Template(Partial)SpecializationDecl as input type.
-static Sema::TemplateDeductionResult FinishTemplateArgumentDeduction(
- Sema &S, VarTemplatePartialSpecializationDecl *Partial,
- const TemplateArgumentList &TemplateArgs,
- SmallVectorImpl<DeducedTemplateArgument> &Deduced,
- TemplateDeductionInfo &Info) {
- // Unevaluated SFINAE context.
- EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
- Sema::SFINAETrap Trap(S);
-
- // C++ [temp.deduct.type]p2:
- // [...] or if any template argument remains neither deduced nor
- // explicitly specified, template argument deduction fails.
- SmallVector<TemplateArgument, 4> Builder;
- TemplateParameterList *PartialParams = Partial->getTemplateParameters();
- for (unsigned I = 0, N = PartialParams->size(); I != N; ++I) {
- NamedDecl *Param = PartialParams->getParam(I);
- if (Deduced[I].isNull()) {
- Info.Param = makeTemplateParameter(Param);
- return Sema::TDK_Incomplete;
- }
-
- // We have deduced this argument, so it still needs to be
- // checked and converted.
- if (ConvertDeducedTemplateArgument(S, Param, Deduced[I], Partial,
- Info, false, Builder)) {
- Info.Param = makeTemplateParameter(Param);
- // FIXME: These template arguments are temporary. Free them!
- Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder));
- return Sema::TDK_SubstitutionFailure;
- }
- }
-
- // Form the template argument list from the deduced template arguments.
- TemplateArgumentList *DeducedArgumentList = TemplateArgumentList::CreateCopy(
- S.Context, Builder);
-
- Info.reset(DeducedArgumentList);
-
- // Substitute the deduced template arguments into the template
- // arguments of the class template partial specialization, and
- // verify that the instantiated template arguments are both valid
- // and are equivalent to the template arguments originally provided
- // to the class template.
- LocalInstantiationScope InstScope(S);
- VarTemplateDecl *VarTemplate = Partial->getSpecializedTemplate();
- const ASTTemplateArgumentListInfo *PartialTemplArgInfo
- = Partial->getTemplateArgsAsWritten();
- const TemplateArgumentLoc *PartialTemplateArgs
- = PartialTemplArgInfo->getTemplateArgs();
-
- TemplateArgumentListInfo InstArgs(PartialTemplArgInfo->LAngleLoc,
- PartialTemplArgInfo->RAngleLoc);
-
- if (S.Subst(PartialTemplateArgs, PartialTemplArgInfo->NumTemplateArgs,
- InstArgs, MultiLevelTemplateArgumentList(*DeducedArgumentList))) {
- unsigned ArgIdx = InstArgs.size(), ParamIdx = ArgIdx;
- if (ParamIdx >= Partial->getTemplateParameters()->size())
- ParamIdx = Partial->getTemplateParameters()->size() - 1;
-
- Decl *Param = const_cast<NamedDecl *>(
- Partial->getTemplateParameters()->getParam(ParamIdx));
- Info.Param = makeTemplateParameter(Param);
- Info.FirstArg = PartialTemplateArgs[ArgIdx].getArgument();
- return Sema::TDK_SubstitutionFailure;
- }
- SmallVector<TemplateArgument, 4> ConvertedInstArgs;
- if (S.CheckTemplateArgumentList(VarTemplate, Partial->getLocation(), InstArgs,
- false, ConvertedInstArgs))
- return Sema::TDK_SubstitutionFailure;
-
- TemplateParameterList *TemplateParams = VarTemplate->getTemplateParameters();
- for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) {
- TemplateArgument InstArg = ConvertedInstArgs.data()[I];
- if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg)) {
- Info.Param = makeTemplateParameter(TemplateParams->getParam(I));
- Info.FirstArg = TemplateArgs[I];
- Info.SecondArg = InstArg;
- return Sema::TDK_NonDeducedMismatch;
- }
- }
-
- if (Trap.hasErrorOccurred())
- return Sema::TDK_SubstitutionFailure;
-
- return Sema::TDK_Success;
+ return ::FinishTemplateArgumentDeduction(
+ *this, Partial, /*PartialOrdering=*/false, TemplateArgs, Deduced, Info);
}
/// \brief Perform template argument deduction to determine whether
/// the given template arguments match the given variable template
/// partial specialization per C++ [temp.class.spec.match].
-/// TODO: Unify with ClassTemplatePartialSpecializationDecl version?
-/// May require unifying ClassTemplate(Partial)SpecializationDecl and
-/// VarTemplate(Partial)SpecializationDecl with a new data
-/// structure Template(Partial)SpecializationDecl, and
-/// using Template(Partial)SpecializationDecl as input type.
Sema::TemplateDeductionResult
Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
@@ -2432,8 +2525,8 @@ Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
if (Trap.hasErrorOccurred())
return Sema::TDK_SubstitutionFailure;
- return ::FinishTemplateArgumentDeduction(*this, Partial, TemplateArgs,
- Deduced, Info);
+ return ::FinishTemplateArgumentDeduction(
+ *this, Partial, /*PartialOrdering=*/false, TemplateArgs, Deduced, Info);
}
/// \brief Determine whether the given type T is a simple-template-id type.
@@ -2573,15 +2666,15 @@ Sema::SubstituteExplicitTemplateArguments(
ParamTypes, /*params*/ nullptr, ExtParamInfos))
return TDK_SubstitutionFailure;
}
-
+
// Instantiate the return type.
QualType ResultType;
{
// C++11 [expr.prim.general]p3:
- // If a declaration declares a member function or member function
- // template of a class X, the expression this is a prvalue of type
+ // If a declaration declares a member function or member function
+ // template of a class X, the expression this is a prvalue of type
// "pointer to cv-qualifier-seq X" between the optional cv-qualifer-seq
- // and the end of the function-definition, member-declarator, or
+ // and the end of the function-definition, member-declarator, or
// declarator.
unsigned ThisTypeQuals = 0;
CXXRecordDecl *ThisContext = nullptr;
@@ -2589,7 +2682,7 @@ Sema::SubstituteExplicitTemplateArguments(
ThisContext = Method->getParent();
ThisTypeQuals = Method->getTypeQualifiers();
}
-
+
CXXThisScopeRAII ThisScope(*this, ThisContext, ThisTypeQuals,
getLangOpts().CPlusPlus11);
@@ -2645,35 +2738,42 @@ Sema::SubstituteExplicitTemplateArguments(
/// \brief Check whether the deduced argument type for a call to a function
/// template matches the actual argument type per C++ [temp.deduct.call]p4.
-static bool
-CheckOriginalCallArgDeduction(Sema &S, Sema::OriginalCallArg OriginalArg,
+static bool
+CheckOriginalCallArgDeduction(Sema &S, Sema::OriginalCallArg OriginalArg,
QualType DeducedA) {
ASTContext &Context = S.Context;
-
+
QualType A = OriginalArg.OriginalArgType;
QualType OriginalParamType = OriginalArg.OriginalParamType;
-
+
// Check for type equality (top-level cv-qualifiers are ignored).
if (Context.hasSameUnqualifiedType(A, DeducedA))
return false;
-
+
// Strip off references on the argument types; they aren't needed for
// the following checks.
if (const ReferenceType *DeducedARef = DeducedA->getAs<ReferenceType>())
DeducedA = DeducedARef->getPointeeType();
if (const ReferenceType *ARef = A->getAs<ReferenceType>())
A = ARef->getPointeeType();
-
+
// C++ [temp.deduct.call]p4:
// [...] However, there are three cases that allow a difference:
- // - If the original P is a reference type, the deduced A (i.e., the
- // type referred to by the reference) can be more cv-qualified than
+ // - If the original P is a reference type, the deduced A (i.e., the
+ // type referred to by the reference) can be more cv-qualified than
// the transformed A.
if (const ReferenceType *OriginalParamRef
= OriginalParamType->getAs<ReferenceType>()) {
// We don't want to keep the reference around any more.
OriginalParamType = OriginalParamRef->getPointeeType();
-
+
+ // FIXME: Resolve core issue (no number yet): if the original P is a
+ // reference type and the transformed A is function type "noexcept F",
+ // the deduced A can be F.
+ QualType Tmp;
+ if (A->isFunctionType() && S.IsFunctionConversion(A, DeducedA, Tmp))
+ return false;
+
Qualifiers AQuals = A.getQualifiers();
Qualifiers DeducedAQuals = DeducedA.getQualifiers();
@@ -2693,34 +2793,32 @@ CheckOriginalCallArgDeduction(Sema &S, Sema::OriginalCallArg OriginalArg,
// Qualifiers match; there's nothing to do.
} else if (!DeducedAQuals.compatiblyIncludes(AQuals)) {
return true;
- } else {
+ } else {
// Qualifiers are compatible, so have the argument type adopt the
// deduced argument type's qualifiers as if we had performed the
// qualification conversion.
A = Context.getQualifiedType(A.getUnqualifiedType(), DeducedAQuals);
}
}
-
- // - The transformed A can be another pointer or pointer to member
- // type that can be converted to the deduced A via a qualification
- // conversion.
+
+ // - The transformed A can be another pointer or pointer to member
+ // type that can be converted to the deduced A via a function pointer
+ // conversion and/or a qualification conversion.
//
- // Also allow conversions which merely strip [[noreturn]] from function types
- // (recursively) as an extension.
- // FIXME: Currently, this doesn't play nicely with qualification conversions.
+ // Also allow conversions which merely strip __attribute__((noreturn)) from
+ // function types (recursively).
bool ObjCLifetimeConversion = false;
QualType ResultTy;
if ((A->isAnyPointerType() || A->isMemberPointerType()) &&
(S.IsQualificationConversion(A, DeducedA, false,
ObjCLifetimeConversion) ||
- S.IsNoReturnConversion(A, DeducedA, ResultTy)))
+ S.IsFunctionConversion(A, DeducedA, ResultTy)))
return false;
-
-
- // - If P is a class and P has the form simple-template-id, then the
+
+ // - If P is a class and P has the form simple-template-id, then the
// transformed A can be a derived class of the deduced A. [...]
- // [...] Likewise, if P is a pointer to a class of the form
- // simple-template-id, the transformed A can be a pointer to a
+ // [...] Likewise, if P is a pointer to a class of the form
+ // simple-template-id, the transformed A can be a pointer to a
// derived class pointed to by the deduced A.
if (const PointerType *OriginalParamPtr
= OriginalParamType->getAs<PointerType>()) {
@@ -2734,14 +2832,14 @@ CheckOriginalCallArgDeduction(Sema &S, Sema::OriginalCallArg OriginalArg,
}
}
}
-
+
if (Context.hasSameUnqualifiedType(A, DeducedA))
return false;
-
+
if (A->isRecordType() && isSimpleTemplateIdType(OriginalParamType) &&
S.IsDerivedFrom(SourceLocation(), A, DeducedA))
return false;
-
+
return true;
}
@@ -2759,9 +2857,6 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs,
bool PartialOverloading) {
- TemplateParameterList *TemplateParams
- = FunctionTemplate->getTemplateParameters();
-
// Unevaluated SFINAE context.
EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
SFINAETrap Trap(*this);
@@ -2782,114 +2877,11 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
// [...] or if any template argument remains neither deduced nor
// explicitly specified, template argument deduction fails.
SmallVector<TemplateArgument, 4> Builder;
- for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
- NamedDecl *Param = TemplateParams->getParam(I);
-
- if (!Deduced[I].isNull()) {
- if (I < NumExplicitlySpecified) {
- // We have already fully type-checked and converted this
- // argument, because it was explicitly-specified. Just record the
- // presence of this argument.
- Builder.push_back(Deduced[I]);
- // We may have had explicitly-specified template arguments for a
- // template parameter pack (that may or may not have been extended
- // via additional deduced arguments).
- if (Param->isParameterPack() && CurrentInstantiationScope) {
- if (CurrentInstantiationScope->getPartiallySubstitutedPack() ==
- Param) {
- // Forget the partially-substituted pack; its substitution is now
- // complete.
- CurrentInstantiationScope->ResetPartiallySubstitutedPack();
- }
- }
- continue;
- }
-
- // We have deduced this argument, so it still needs to be
- // checked and converted.
- if (ConvertDeducedTemplateArgument(*this, Param, Deduced[I],
- FunctionTemplate, Info,
- true, Builder)) {
- Info.Param = makeTemplateParameter(Param);
- // FIXME: These template arguments are temporary. Free them!
- Info.reset(TemplateArgumentList::CreateCopy(Context, Builder));
- return TDK_SubstitutionFailure;
- }
-
- continue;
- }
-
- // C++0x [temp.arg.explicit]p3:
- // A trailing template parameter pack (14.5.3) not otherwise deduced will
- // be deduced to an empty sequence of template arguments.
- // FIXME: Where did the word "trailing" come from?
- if (Param->isTemplateParameterPack()) {
- // We may have had explicitly-specified template arguments for this
- // template parameter pack. If so, our empty deduction extends the
- // explicitly-specified set (C++0x [temp.arg.explicit]p9).
- const TemplateArgument *ExplicitArgs;
- unsigned NumExplicitArgs;
- if (CurrentInstantiationScope &&
- CurrentInstantiationScope->getPartiallySubstitutedPack(&ExplicitArgs,
- &NumExplicitArgs)
- == Param) {
- Builder.push_back(TemplateArgument(
- llvm::makeArrayRef(ExplicitArgs, NumExplicitArgs)));
-
- // Forget the partially-substituted pack; its substitution is now
- // complete.
- CurrentInstantiationScope->ResetPartiallySubstitutedPack();
- } else {
- // Go through the motions of checking the empty argument pack against
- // the parameter pack.
- DeducedTemplateArgument DeducedPack(TemplateArgument::getEmptyPack());
- if (ConvertDeducedTemplateArgument(*this, Param, DeducedPack,
- FunctionTemplate, Info, true,
- Builder)) {
- Info.Param = makeTemplateParameter(Param);
- // FIXME: These template arguments are temporary. Free them!
- Info.reset(TemplateArgumentList::CreateCopy(Context, Builder));
- return TDK_SubstitutionFailure;
- }
- }
- continue;
- }
-
- // Substitute into the default template argument, if available.
- bool HasDefaultArg = false;
- TemplateArgumentLoc DefArg
- = SubstDefaultTemplateArgumentIfAvailable(FunctionTemplate,
- FunctionTemplate->getLocation(),
- FunctionTemplate->getSourceRange().getEnd(),
- Param,
- Builder, HasDefaultArg);
-
- // If there was no default argument, deduction is incomplete.
- if (DefArg.getArgument().isNull()) {
- Info.Param = makeTemplateParameter(
- const_cast<NamedDecl *>(TemplateParams->getParam(I)));
- Info.reset(TemplateArgumentList::CreateCopy(Context, Builder));
- if (PartialOverloading) break;
-
- return HasDefaultArg ? TDK_SubstitutionFailure : TDK_Incomplete;
- }
-
- // Check whether we can actually use the default argument.
- if (CheckTemplateArgument(Param, DefArg,
- FunctionTemplate,
- FunctionTemplate->getLocation(),
- FunctionTemplate->getSourceRange().getEnd(),
- 0, Builder,
- CTAK_Specified)) {
- Info.Param = makeTemplateParameter(
- const_cast<NamedDecl *>(TemplateParams->getParam(I)));
- // FIXME: These template arguments are temporary. Free them!
- Info.reset(TemplateArgumentList::CreateCopy(Context, Builder));
- return TDK_SubstitutionFailure;
- }
-
- // If we get here, we successfully used the default template argument.
- }
+ if (auto Result = ConvertDeducedTemplateArguments(
+ *this, FunctionTemplate, /*IsDeduced*/true, Deduced, Info, Builder,
+ CurrentInstantiationScope, NumExplicitlySpecified,
+ PartialOverloading))
+ return Result;
// Form the template argument list from the deduced template arguments.
TemplateArgumentList *DeducedArgumentList
@@ -2927,15 +2919,15 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
if (OriginalCallArgs) {
// C++ [temp.deduct.call]p4:
// In general, the deduction process attempts to find template argument
- // values that will make the deduced A identical to A (after the type A
+ // values that will make the deduced A identical to A (after the type A
// is transformed as described above). [...]
for (unsigned I = 0, N = OriginalCallArgs->size(); I != N; ++I) {
OriginalCallArg OriginalArg = (*OriginalCallArgs)[I];
unsigned ParamIdx = OriginalArg.ArgIdx;
-
+
if (ParamIdx >= Specialization->getNumParams())
continue;
-
+
QualType DeducedA = Specialization->getParamDecl(ParamIdx)->getType();
if (CheckOriginalCallArgDeduction(*this, OriginalArg, DeducedA)) {
Info.FirstArg = TemplateArgument(DeducedA);
@@ -2945,7 +2937,7 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
}
}
}
-
+
// If we suppressed any diagnostics while performing template argument
// deduction, and if we haven't already instantiated this declaration,
// keep track of these diagnostics. They'll be emitted if this specialization
@@ -3025,7 +3017,7 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
return QualType();
}
-
+
// Gather the explicit template arguments, if any.
TemplateArgumentListInfo ExplicitTemplateArgs;
if (Ovl->hasExplicitTemplateArgs())
@@ -3041,14 +3033,14 @@ ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
// non-deduced context.
if (!Ovl->hasExplicitTemplateArgs())
return QualType();
-
- // Otherwise, see if we can resolve a function type
+
+ // Otherwise, see if we can resolve a function type
FunctionDecl *Specialization = nullptr;
TemplateDeductionInfo Info(Ovl->getNameLoc());
if (S.DeduceTemplateArguments(FunTmpl, &ExplicitTemplateArgs,
Specialization, Info))
continue;
-
+
D = Specialization;
}
@@ -3250,16 +3242,13 @@ DeduceFromInitializerList(Sema &S, TemplateParameterList *TemplateParams,
S.Context.getAsDependentSizedArrayType(AdjustedParamType);
// Determine the array bound is something we can deduce.
if (NonTypeTemplateParmDecl *NTTP =
- getDeducedParameterFromExpr(ArrTy->getSizeExpr())) {
+ getDeducedParameterFromExpr(Info, ArrTy->getSizeExpr())) {
// We can perform template argument deduction for the given non-type
// template parameter.
- assert(NTTP->getDepth() == 0 &&
- "Cannot deduce non-type template argument at depth > 0");
llvm::APInt Size(S.Context.getIntWidth(NTTP->getType()),
ILE->getNumInits());
-
Result = DeduceNonTypeTemplateArgument(
- S, NTTP, llvm::APSInt(Size), NTTP->getType(),
+ S, TemplateParams, NTTP, llvm::APSInt(Size), NTTP->getType(),
/*ArrayBound=*/true, Info, Deduced);
}
}
@@ -3291,7 +3280,7 @@ DeduceTemplateArgumentByListElement(Sema &S,
// For all other cases, just match by type.
QualType ArgType = Arg->getType();
- if (AdjustFunctionParmAndArgTypesForDeduction(S, TemplateParams, ParamType,
+ if (AdjustFunctionParmAndArgTypesForDeduction(S, TemplateParams, ParamType,
ArgType, Arg, TDF)) {
Info.Expression = Arg;
return Sema::TDK_FailedOverloadResolution;
@@ -3382,7 +3371,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
ParamIdx != NumParamTypes; ++ParamIdx) {
QualType OrigParamType = ParamTypes[ParamIdx];
QualType ParamType = OrigParamType;
-
+
const PackExpansionType *ParamExpansion
= dyn_cast<PackExpansionType>(ParamType);
if (!ParamExpansion) {
@@ -3392,7 +3381,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
Expr *Arg = Args[ArgIdx++];
QualType ArgType = Arg->getType();
-
+
unsigned TDF = 0;
if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
ParamType, ArgType, Arg,
@@ -3419,7 +3408,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// Keep track of the argument type and corresponding parameter index,
// so we can check for compatibility between the deduced A and A.
- OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx-1,
+ OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx-1,
ArgType));
if (TemplateDeductionResult Result
@@ -3482,7 +3471,7 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
// Keep track of the argument type and corresponding argument index,
// so we can check for compatibility between the deduced A and A.
if (hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
- OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx,
+ OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx,
ArgType));
if (TemplateDeductionResult Result
@@ -3511,25 +3500,42 @@ Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
}
QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType,
- QualType FunctionType) {
+ QualType FunctionType,
+ bool AdjustExceptionSpec) {
if (ArgFunctionType.isNull())
return ArgFunctionType;
const FunctionProtoType *FunctionTypeP =
FunctionType->castAs<FunctionProtoType>();
- CallingConv CC = FunctionTypeP->getCallConv();
- bool NoReturn = FunctionTypeP->getNoReturnAttr();
const FunctionProtoType *ArgFunctionTypeP =
ArgFunctionType->getAs<FunctionProtoType>();
- if (ArgFunctionTypeP->getCallConv() == CC &&
- ArgFunctionTypeP->getNoReturnAttr() == NoReturn)
+
+ FunctionProtoType::ExtProtoInfo EPI = ArgFunctionTypeP->getExtProtoInfo();
+ bool Rebuild = false;
+
+ CallingConv CC = FunctionTypeP->getCallConv();
+ if (EPI.ExtInfo.getCC() != CC) {
+ EPI.ExtInfo = EPI.ExtInfo.withCallingConv(CC);
+ Rebuild = true;
+ }
+
+ bool NoReturn = FunctionTypeP->getNoReturnAttr();
+ if (EPI.ExtInfo.getNoReturn() != NoReturn) {
+ EPI.ExtInfo = EPI.ExtInfo.withNoReturn(NoReturn);
+ Rebuild = true;
+ }
+
+ if (AdjustExceptionSpec && (FunctionTypeP->hasExceptionSpec() ||
+ ArgFunctionTypeP->hasExceptionSpec())) {
+ EPI.ExceptionSpec = FunctionTypeP->getExtProtoInfo().ExceptionSpec;
+ Rebuild = true;
+ }
+
+ if (!Rebuild)
return ArgFunctionType;
- FunctionType::ExtInfo EI = ArgFunctionTypeP->getExtInfo().withCallingConv(CC);
- EI = EI.withNoReturn(NoReturn);
- ArgFunctionTypeP =
- cast<FunctionProtoType>(Context.adjustFunctionType(ArgFunctionTypeP, EI));
- return QualType(ArgFunctionTypeP, 0);
+ return Context.getFunctionType(ArgFunctionTypeP->getReturnType(),
+ ArgFunctionTypeP->getParamTypes(), EPI);
}
/// \brief Deduce template arguments when taking the address of a function
@@ -3554,14 +3560,17 @@ QualType Sema::adjustCCAndNoReturn(QualType ArgFunctionType,
/// \param Info the argument will be updated to provide additional information
/// about template argument deduction.
///
+/// \param IsAddressOfFunction If \c true, we are deducing as part of taking
+/// the address of a function template per [temp.deduct.funcaddr] and
+/// [over.over]. If \c false, we are looking up a function template
+/// specialization based on its signature, per [temp.deduct.decl].
+///
/// \returns the result of template argument deduction.
-Sema::TemplateDeductionResult
-Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
- TemplateArgumentListInfo *ExplicitTemplateArgs,
- QualType ArgFunctionType,
- FunctionDecl *&Specialization,
- TemplateDeductionInfo &Info,
- bool InOverloadResolution) {
+Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
+ FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType,
+ FunctionDecl *&Specialization, TemplateDeductionInfo &Info,
+ bool IsAddressOfFunction) {
if (FunctionTemplate->isInvalidDecl())
return TDK_Invalid;
@@ -3569,8 +3578,13 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateParameterList *TemplateParams
= FunctionTemplate->getTemplateParameters();
QualType FunctionType = Function->getType();
- if (!InOverloadResolution)
- ArgFunctionType = adjustCCAndNoReturn(ArgFunctionType, FunctionType);
+
+ // When taking the address of a function, we require convertibility of
+ // the resulting function type. Otherwise, we allow arbitrary mismatches
+ // of calling convention, noreturn, and noexcept.
+ if (!IsAddressOfFunction)
+ ArgFunctionType = adjustCCAndNoReturn(ArgFunctionType, FunctionType,
+ /*AdjustExceptionSpec*/true);
// Substitute any explicit template arguments.
LocalInstantiationScope InstScope(*this);
@@ -3595,9 +3609,11 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
Deduced.resize(TemplateParams->size());
// If the function has a deduced return type, substitute it for a dependent
- // type so that we treat it as a non-deduced context in what follows.
+ // type so that we treat it as a non-deduced context in what follows. If we
+ // are looking up by signature, the signature type should also have a deduced
+ // return type, which we instead expect to exactly match.
bool HasDeducedReturnType = false;
- if (getLangOpts().CPlusPlus14 && InOverloadResolution &&
+ if (getLangOpts().CPlusPlus14 && IsAddressOfFunction &&
Function->getReturnType()->getContainedAutoType()) {
FunctionType = SubstAutoType(FunctionType, Context.DependentTy);
HasDeducedReturnType = true;
@@ -3605,7 +3621,8 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
if (!ArgFunctionType.isNull()) {
unsigned TDF = TDF_TopLevelParameterTypeList;
- if (InOverloadResolution) TDF |= TDF_InOverloadResolution;
+ if (IsAddressOfFunction)
+ TDF |= TDF_InOverloadResolution;
// Deduce template arguments from the function type.
if (TemplateDeductionResult Result
= DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
@@ -3627,86 +3644,106 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
DeduceReturnType(Specialization, Info.getLocation(), false))
return TDK_MiscellaneousDeductionFailure;
+ // If the function has a dependent exception specification, resolve it now,
+ // so we can check that the exception specification matches.
+ auto *SpecializationFPT =
+ Specialization->getType()->castAs<FunctionProtoType>();
+ if (getLangOpts().CPlusPlus1z &&
+ isUnresolvedExceptionSpec(SpecializationFPT->getExceptionSpecType()) &&
+ !ResolveExceptionSpec(Info.getLocation(), SpecializationFPT))
+ return TDK_MiscellaneousDeductionFailure;
+
+ // Adjust the exception specification of the argument again to match the
+ // substituted and resolved type we just formed. (Calling convention and
+ // noreturn can't be dependent, so we don't actually need this for them
+ // right now.)
+ QualType SpecializationType = Specialization->getType();
+ if (!IsAddressOfFunction)
+ ArgFunctionType = adjustCCAndNoReturn(ArgFunctionType, SpecializationType,
+ /*AdjustExceptionSpec*/true);
+
// If the requested function type does not match the actual type of the
// specialization with respect to arguments of compatible pointer to function
// types, template argument deduction fails.
if (!ArgFunctionType.isNull()) {
- if (InOverloadResolution && !isSameOrCompatibleFunctionType(
- Context.getCanonicalType(Specialization->getType()),
- Context.getCanonicalType(ArgFunctionType)))
+ if (IsAddressOfFunction &&
+ !isSameOrCompatibleFunctionType(
+ Context.getCanonicalType(SpecializationType),
+ Context.getCanonicalType(ArgFunctionType)))
return TDK_MiscellaneousDeductionFailure;
- else if(!InOverloadResolution &&
- !Context.hasSameType(Specialization->getType(), ArgFunctionType))
+
+ if (!IsAddressOfFunction &&
+ !Context.hasSameType(SpecializationType, ArgFunctionType))
return TDK_MiscellaneousDeductionFailure;
}
return TDK_Success;
}
-/// \brief Given a function declaration (e.g. a generic lambda conversion
-/// function) that contains an 'auto' in its result type, substitute it
+/// \brief Given a function declaration (e.g. a generic lambda conversion
+/// function) that contains an 'auto' in its result type, substitute it
/// with TypeToReplaceAutoWith. Be careful to pass in the type you want
/// to replace 'auto' with and not the actual result type you want
/// to set the function to.
-static inline void
-SubstAutoWithinFunctionReturnType(FunctionDecl *F,
+static inline void
+SubstAutoWithinFunctionReturnType(FunctionDecl *F,
QualType TypeToReplaceAutoWith, Sema &S) {
assert(!TypeToReplaceAutoWith->getContainedAutoType());
QualType AutoResultType = F->getReturnType();
- assert(AutoResultType->getContainedAutoType());
- QualType DeducedResultType = S.SubstAutoType(AutoResultType,
+ assert(AutoResultType->getContainedAutoType());
+ QualType DeducedResultType = S.SubstAutoType(AutoResultType,
TypeToReplaceAutoWith);
S.Context.adjustDeducedFunctionResultType(F, DeducedResultType);
}
-/// \brief Given a specialized conversion operator of a generic lambda
-/// create the corresponding specializations of the call operator and
-/// the static-invoker. If the return type of the call operator is auto,
-/// deduce its return type and check if that matches the
+/// \brief Given a specialized conversion operator of a generic lambda
+/// create the corresponding specializations of the call operator and
+/// the static-invoker. If the return type of the call operator is auto,
+/// deduce its return type and check if that matches the
/// return type of the destination function ptr.
-static inline Sema::TemplateDeductionResult
+static inline Sema::TemplateDeductionResult
SpecializeCorrespondingLambdaCallOperatorAndInvoker(
CXXConversionDecl *ConversionSpecialized,
SmallVectorImpl<DeducedTemplateArgument> &DeducedArguments,
QualType ReturnTypeOfDestFunctionPtr,
TemplateDeductionInfo &TDInfo,
Sema &S) {
-
+
CXXRecordDecl *LambdaClass = ConversionSpecialized->getParent();
- assert(LambdaClass && LambdaClass->isGenericLambda());
-
+ assert(LambdaClass && LambdaClass->isGenericLambda());
+
CXXMethodDecl *CallOpGeneric = LambdaClass->getLambdaCallOperator();
QualType CallOpResultType = CallOpGeneric->getReturnType();
- const bool GenericLambdaCallOperatorHasDeducedReturnType =
+ const bool GenericLambdaCallOperatorHasDeducedReturnType =
CallOpResultType->getContainedAutoType();
-
- FunctionTemplateDecl *CallOpTemplate =
+
+ FunctionTemplateDecl *CallOpTemplate =
CallOpGeneric->getDescribedFunctionTemplate();
FunctionDecl *CallOpSpecialized = nullptr;
- // Use the deduced arguments of the conversion function, to specialize our
+ // Use the deduced arguments of the conversion function, to specialize our
// generic lambda's call operator.
if (Sema::TemplateDeductionResult Result
- = S.FinishTemplateArgumentDeduction(CallOpTemplate,
- DeducedArguments,
+ = S.FinishTemplateArgumentDeduction(CallOpTemplate,
+ DeducedArguments,
0, CallOpSpecialized, TDInfo))
return Result;
-
+
// If we need to deduce the return type, do so (instantiates the callop).
if (GenericLambdaCallOperatorHasDeducedReturnType &&
CallOpSpecialized->getReturnType()->isUndeducedType())
- S.DeduceReturnType(CallOpSpecialized,
+ S.DeduceReturnType(CallOpSpecialized,
CallOpSpecialized->getPointOfInstantiation(),
/*Diagnose*/ true);
-
+
// Check to see if the return type of the destination ptr-to-function
// matches the return type of the call operator.
if (!S.Context.hasSameType(CallOpSpecialized->getReturnType(),
ReturnTypeOfDestFunctionPtr))
return Sema::TDK_NonDeducedMismatch;
// Since we have succeeded in matching the source and destination
- // ptr-to-functions (now including return type), and have successfully
+ // ptr-to-functions (now including return type), and have successfully
// specialized our corresponding call operator, we are ready to
// specialize the static invoker with the deduced arguments of our
// ptr-to-function.
@@ -3717,16 +3754,16 @@ SpecializeCorrespondingLambdaCallOperatorAndInvoker(
#ifndef NDEBUG
Sema::TemplateDeductionResult LLVM_ATTRIBUTE_UNUSED Result =
#endif
- S.FinishTemplateArgumentDeduction(InvokerTemplate, DeducedArguments, 0,
+ S.FinishTemplateArgumentDeduction(InvokerTemplate, DeducedArguments, 0,
InvokerSpecialized, TDInfo);
- assert(Result == Sema::TDK_Success &&
+ assert(Result == Sema::TDK_Success &&
"If the call operator succeeded so should the invoker!");
// Set the result type to match the corresponding call operator
// specialization's result type.
if (GenericLambdaCallOperatorHasDeducedReturnType &&
InvokerSpecialized->getReturnType()->isUndeducedType()) {
// Be sure to get the type to replace 'auto' with and not
- // the full result type of the call op specialization
+ // the full result type of the call op specialization
// to substitute into the 'auto' of the invoker and conversion
// function.
// For e.g.
@@ -3738,14 +3775,14 @@ SpecializeCorrespondingLambdaCallOperatorAndInvoker(
->getDeducedType();
SubstAutoWithinFunctionReturnType(InvokerSpecialized,
TypeToReplaceAutoWith, S);
- SubstAutoWithinFunctionReturnType(ConversionSpecialized,
+ SubstAutoWithinFunctionReturnType(ConversionSpecialized,
TypeToReplaceAutoWith, S);
}
-
+
// Ensure that static invoker doesn't have a const qualifier.
- // FIXME: When creating the InvokerTemplate in SemaLambda.cpp
+ // FIXME: When creating the InvokerTemplate in SemaLambda.cpp
// do not use the CallOperator's TypeSourceInfo which allows
- // the const qualifier to leak through.
+ // the const qualifier to leak through.
const FunctionProtoType *InvokerFPT = InvokerSpecialized->
getType().getTypePtr()->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = InvokerFPT->getExtProtoInfo();
@@ -3857,7 +3894,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
// Finish template argument deduction.
FunctionDecl *ConversionSpecialized = nullptr;
TemplateDeductionResult Result
- = FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0,
+ = FinishTemplateArgumentDeduction(ConversionTemplate, Deduced, 0,
ConversionSpecialized, Info);
Specialization = cast_or_null<CXXConversionDecl>(ConversionSpecialized);
@@ -3866,19 +3903,19 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
// function to specialize the corresponding call operator.
// e.g., int (*fp)(int) = [](auto a) { return a; };
if (Result == TDK_Success && isLambdaConversionOperator(ConversionGeneric)) {
-
+
// Get the return type of the destination ptr-to-function we are converting
- // to. This is necessary for matching the lambda call operator's return
+ // to. This is necessary for matching the lambda call operator's return
// type to that of the destination ptr-to-function's return type.
- assert(A->isPointerType() &&
+ assert(A->isPointerType() &&
"Can only convert from lambda to ptr-to-function");
- const FunctionType *ToFunType =
+ const FunctionType *ToFunType =
A->getPointeeType().getTypePtr()->getAs<FunctionType>();
const QualType DestFunctionPtrReturnType = ToFunType->getReturnType();
- // Create the corresponding specializations of the call operator and
- // the static-invoker; and if the return type is auto,
- // deduce the return type and check if it matches the
+ // Create the corresponding specializations of the call operator and
+ // the static-invoker; and if the return type is auto,
+ // deduce the return type and check if it matches the
// DestFunctionPtrReturnType.
// For instance:
// auto L = [](auto a) { return f(a); };
@@ -3886,7 +3923,7 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
// char (*fp2)(int) = L; <-- Not OK.
Result = SpecializeCorrespondingLambdaCallOperatorAndInvoker(
- Specialization, Deduced, DestFunctionPtrReturnType,
+ Specialization, Deduced, DestFunctionPtrReturnType,
Info, *this);
}
return Result;
@@ -3908,16 +3945,22 @@ Sema::DeduceTemplateArguments(FunctionTemplateDecl *ConversionTemplate,
/// \param Info the argument will be updated to provide additional information
/// about template argument deduction.
///
+/// \param IsAddressOfFunction If \c true, we are deducing as part of taking
+/// the address of a function template in a context where we do not have a
+/// target type, per [over.over]. If \c false, we are looking up a function
+/// template specialization based on its signature, which only happens when
+/// deducing a function parameter type from an argument that is a template-id
+/// naming a function template specialization.
+///
/// \returns the result of template argument deduction.
-Sema::TemplateDeductionResult
-Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
- TemplateArgumentListInfo *ExplicitTemplateArgs,
- FunctionDecl *&Specialization,
- TemplateDeductionInfo &Info,
- bool InOverloadResolution) {
+Sema::TemplateDeductionResult Sema::DeduceTemplateArguments(
+ FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ FunctionDecl *&Specialization, TemplateDeductionInfo &Info,
+ bool IsAddressOfFunction) {
return DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs,
QualType(), Specialization, Info,
- InOverloadResolution);
+ IsAddressOfFunction);
}
namespace {
@@ -3926,10 +3969,12 @@ namespace {
class SubstituteAutoTransform :
public TreeTransform<SubstituteAutoTransform> {
QualType Replacement;
+ bool UseAutoSugar;
public:
- SubstituteAutoTransform(Sema &SemaRef, QualType Replacement)
+ SubstituteAutoTransform(Sema &SemaRef, QualType Replacement,
+ bool UseAutoSugar = true)
: TreeTransform<SubstituteAutoTransform>(SemaRef),
- Replacement(Replacement) {}
+ Replacement(Replacement), UseAutoSugar(UseAutoSugar) {}
QualType TransformAutoType(TypeLocBuilder &TLB, AutoTypeLoc TL) {
// If we're building the type pattern to deduce against, don't wrap the
@@ -3939,19 +3984,17 @@ namespace {
// auto &&lref = lvalue;
// must transform into "rvalue reference to T" not "rvalue reference to
// auto type deduced as T" in order for [temp.deduct.call]p3 to apply.
- if (!Replacement.isNull() && isa<TemplateTypeParmType>(Replacement)) {
+ if (!UseAutoSugar) {
+ assert(isa<TemplateTypeParmType>(Replacement) &&
+ "unexpected unsugared replacement kind");
QualType Result = Replacement;
TemplateTypeParmTypeLoc NewTL =
TLB.push<TemplateTypeParmTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
} else {
- bool Dependent =
- !Replacement.isNull() && Replacement->isDependentType();
- QualType Result =
- SemaRef.Context.getAutoType(Dependent ? QualType() : Replacement,
- TL.getTypePtr()->getKeyword(),
- Dependent);
+ QualType Result = SemaRef.Context.getAutoType(
+ Replacement, TL.getTypePtr()->getKeyword(), Replacement.isNull());
AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
NewTL.setNameLoc(TL.getNameLoc());
return Result;
@@ -3974,18 +4017,29 @@ namespace {
}
Sema::DeduceAutoResult
-Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init, QualType &Result) {
- return DeduceAutoType(Type->getTypeLoc(), Init, Result);
+Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init, QualType &Result,
+ Optional<unsigned> DependentDeductionDepth) {
+ return DeduceAutoType(Type->getTypeLoc(), Init, Result,
+ DependentDeductionDepth);
}
/// \brief Deduce the type for an auto type-specifier (C++11 [dcl.spec.auto]p6)
///
+/// Note that this is done even if the initializer is dependent. (This is
+/// necessary to support partial ordering of templates using 'auto'.)
+/// A dependent type will be produced when deducing from a dependent type.
+///
/// \param Type the type pattern using the auto type-specifier.
/// \param Init the initializer for the variable whose type is to be deduced.
/// \param Result if type deduction was successful, this will be set to the
/// deduced type.
+/// \param DependentDeductionDepth Set if we should permit deduction in
+/// dependent cases. This is necessary for template partial ordering with
+/// 'auto' template parameters. The value specified is the template
+/// parameter depth at which we should perform 'auto' deduction.
Sema::DeduceAutoResult
-Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
+Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
+ Optional<unsigned> DependentDeductionDepth) {
if (Init->getType()->isNonOverloadPlaceholderType()) {
ExprResult NonPlaceholder = CheckPlaceholderExpr(Init);
if (NonPlaceholder.isInvalid())
@@ -3993,12 +4047,16 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
Init = NonPlaceholder.get();
}
- if (Init->isTypeDependent() || Type.getType()->isDependentType()) {
- Result = SubstituteAutoTransform(*this, Context.DependentTy).Apply(Type);
+ if (!DependentDeductionDepth &&
+ (Type.getType()->isDependentType() || Init->isTypeDependent())) {
+ Result = SubstituteAutoTransform(*this, QualType()).Apply(Type);
assert(!Result.isNull() && "substituting DependentTy can't fail");
return DAR_Succeeded;
}
+ // Find the depth of template parameter to synthesize.
+ unsigned Depth = DependentDeductionDepth.getValueOr(0);
+
// If this is a 'decltype(auto)' specifier, do the decltype dance.
// Since 'decltype(auto)' can only occur at the top of the type, we
// don't need to go digging for it.
@@ -4031,15 +4089,16 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
LocalInstantiationScope InstScope(*this);
// Build template<class TemplParam> void Func(FuncParam);
- TemplateTypeParmDecl *TemplParam =
- TemplateTypeParmDecl::Create(Context, nullptr, SourceLocation(), Loc, 0, 0,
- nullptr, false, false);
+ TemplateTypeParmDecl *TemplParam = TemplateTypeParmDecl::Create(
+ Context, nullptr, SourceLocation(), Loc, Depth, 0, nullptr, false, false);
QualType TemplArg = QualType(TemplParam->getTypeForDecl(), 0);
NamedDecl *TemplParamPtr = TemplParam;
- FixedSizeTemplateParameterListStorage<1> TemplateParamsSt(
- Loc, Loc, TemplParamPtr, Loc);
+ FixedSizeTemplateParameterListStorage<1, false> TemplateParamsSt(
+ Loc, Loc, TemplParamPtr, Loc, nullptr);
- QualType FuncParam = SubstituteAutoTransform(*this, TemplArg).Apply(Type);
+ QualType FuncParam =
+ SubstituteAutoTransform(*this, TemplArg, /*UseAutoSugar*/false)
+ .Apply(Type);
assert(!FuncParam.isNull() &&
"substituting template parameter for 'auto' failed");
@@ -4049,7 +4108,18 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
QualType InitType = Init->getType();
unsigned TDF = 0;
- TemplateDeductionInfo Info(Loc);
+ TemplateDeductionInfo Info(Loc, Depth);
+
+ // If deduction failed, don't diagnose if the initializer is dependent; it
+ // might acquire a matching type in the instantiation.
+ auto DeductionFailed = [&]() -> DeduceAutoResult {
+ if (Init->isTypeDependent()) {
+ Result = SubstituteAutoTransform(*this, QualType()).Apply(Type);
+ assert(!Result.isNull() && "substituting DependentTy can't fail");
+ return DAR_Succeeded;
+ }
+ return DAR_Failed;
+ };
InitListExpr *InitList = dyn_cast<InitListExpr>(Init);
if (InitList) {
@@ -4057,7 +4127,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
if (DeduceTemplateArgumentByListElement(*this, TemplateParamsSt.get(),
TemplArg, InitList->getInit(i),
Info, Deduced, TDF))
- return DAR_Failed;
+ return DeductionFailed();
}
} else {
if (!getLangOpts().CPlusPlus && Init->refersToBitField()) {
@@ -4072,11 +4142,12 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
if (DeduceTemplateArgumentsByTypeMatch(*this, TemplateParamsSt.get(),
FuncParam, InitType, Info, Deduced,
TDF))
- return DAR_Failed;
+ return DeductionFailed();
}
+ // Could be null if somehow 'auto' appears in a non-deduced context.
if (Deduced[0].getKind() != TemplateArgument::Type)
- return DAR_Failed;
+ return DeductionFailed();
QualType DeducedType = Deduced[0].getAsType();
@@ -4088,7 +4159,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
Result = SubstituteAutoTransform(*this, DeducedType).Apply(Type);
if (Result.isNull())
- return DAR_FailedAlreadyDiagnosed;
+ return DAR_FailedAlreadyDiagnosed;
// Check that the deduced argument type is compatible with the original
// argument type per C++ [temp.deduct.call]p4.
@@ -4097,22 +4168,26 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result) {
Sema::OriginalCallArg(FuncParam,0,InitType),
Result)) {
Result = QualType();
- return DAR_Failed;
+ return DeductionFailed();
}
return DAR_Succeeded;
}
-QualType Sema::SubstAutoType(QualType TypeWithAuto,
+QualType Sema::SubstAutoType(QualType TypeWithAuto,
QualType TypeToReplaceAuto) {
- return SubstituteAutoTransform(*this, TypeToReplaceAuto).
- TransformType(TypeWithAuto);
+ if (TypeToReplaceAuto->isDependentType())
+ TypeToReplaceAuto = QualType();
+ return SubstituteAutoTransform(*this, TypeToReplaceAuto)
+ .TransformType(TypeWithAuto);
}
-TypeSourceInfo* Sema::SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
+TypeSourceInfo* Sema::SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType TypeToReplaceAuto) {
- return SubstituteAutoTransform(*this, TypeToReplaceAuto).
- TransformType(TypeWithAuto);
+ if (TypeToReplaceAuto->isDependentType())
+ TypeToReplaceAuto = QualType();
+ return SubstituteAutoTransform(*this, TypeToReplaceAuto)
+ .TransformType(TypeWithAuto);
}
void Sema::DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init) {
@@ -4284,6 +4359,10 @@ static bool isAtLeastAsSpecializedAs(Sema &S,
if (Deduced[ArgIdx].isNull())
break;
+ // FIXME: We fail to implement [temp.deduct.type]p1 along this path. We need
+ // to substitute the deduced arguments back into the template and check that
+ // we get the right type.
+
if (ArgIdx == NumArgs) {
// All template arguments were deduced. FT1 is at least as specialized
// as FT2.
@@ -4487,12 +4566,12 @@ UnresolvedSetIterator Sema::getMostSpecialized(
// FIXME: Can we order the candidates in some sane way?
for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I) {
PartialDiagnostic PD = CandidateDiag;
- PD << getTemplateArgumentBindingsText(
- cast<FunctionDecl>(*I)->getPrimaryTemplate()->getTemplateParameters(),
- *cast<FunctionDecl>(*I)->getTemplateSpecializationArgs());
+ const auto *FD = cast<FunctionDecl>(*I);
+ PD << FD << getTemplateArgumentBindingsText(
+ FD->getPrimaryTemplate()->getTemplateParameters(),
+ *FD->getTemplateSpecializationArgs());
if (!TargetType.isNull())
- HandleFunctionTypeMismatch(PD, cast<FunctionDecl>(*I)->getType(),
- TargetType);
+ HandleFunctionTypeMismatch(PD, FD->getType(), TargetType);
Diag((*I)->getLocation(), PD);
}
}
@@ -4500,21 +4579,17 @@ UnresolvedSetIterator Sema::getMostSpecialized(
return SpecEnd;
}
-/// \brief Returns the more specialized class template partial specialization
-/// according to the rules of partial ordering of class template partial
-/// specializations (C++ [temp.class.order]).
-///
-/// \param PS1 the first class template partial specialization
+/// Determine whether one partial specialization, P1, is at least as
+/// specialized than another, P2.
///
-/// \param PS2 the second class template partial specialization
-///
-/// \returns the more specialized class template partial specialization. If
-/// neither partial specialization is more specialized, returns NULL.
-ClassTemplatePartialSpecializationDecl *
-Sema::getMoreSpecializedPartialSpecialization(
- ClassTemplatePartialSpecializationDecl *PS1,
- ClassTemplatePartialSpecializationDecl *PS2,
- SourceLocation Loc) {
+/// \tparam TemplateLikeDecl The kind of P2, which must be a
+/// TemplateDecl or {Class,Var}TemplatePartialSpecializationDecl.
+/// \param T1 The injected-class-name of P1 (faked for a variable template).
+/// \param T2 The injected-class-name of P2 (faked for a variable template).
+template<typename TemplateLikeDecl>
+static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2,
+ TemplateLikeDecl *P2,
+ TemplateDeductionInfo &Info) {
// C++ [temp.class.order]p1:
// For two class template partial specializations, the first is at least as
// specialized as the second if, given the following rewrite to two
@@ -4540,37 +4615,50 @@ Sema::getMoreSpecializedPartialSpecialization(
// template partial specialization's template arguments, for
// example.
SmallVector<DeducedTemplateArgument, 4> Deduced;
- TemplateDeductionInfo Info(Loc);
+ // Determine whether P1 is at least as specialized as P2.
+ Deduced.resize(P2->getTemplateParameters()->size());
+ if (DeduceTemplateArgumentsByTypeMatch(S, P2->getTemplateParameters(),
+ T2, T1, Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true))
+ return false;
+
+ SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),
+ Deduced.end());
+ Sema::InstantiatingTemplate Inst(S, Info.getLocation(), P2, DeducedArgs,
+ Info);
+ auto *TST1 = T1->castAs<TemplateSpecializationType>();
+ if (FinishTemplateArgumentDeduction(
+ S, P2, /*PartialOrdering=*/true,
+ TemplateArgumentList(TemplateArgumentList::OnStack,
+ TST1->template_arguments()),
+ Deduced, Info))
+ return false;
+
+ return true;
+}
+
+/// \brief Returns the more specialized class template partial specialization
+/// according to the rules of partial ordering of class template partial
+/// specializations (C++ [temp.class.order]).
+///
+/// \param PS1 the first class template partial specialization
+///
+/// \param PS2 the second class template partial specialization
+///
+/// \returns the more specialized class template partial specialization. If
+/// neither partial specialization is more specialized, returns NULL.
+ClassTemplatePartialSpecializationDecl *
+Sema::getMoreSpecializedPartialSpecialization(
+ ClassTemplatePartialSpecializationDecl *PS1,
+ ClassTemplatePartialSpecializationDecl *PS2,
+ SourceLocation Loc) {
QualType PT1 = PS1->getInjectedSpecializationType();
QualType PT2 = PS2->getInjectedSpecializationType();
- // Determine whether PS1 is at least as specialized as PS2
- Deduced.resize(PS2->getTemplateParameters()->size());
- bool Better1 = !DeduceTemplateArgumentsByTypeMatch(*this,
- PS2->getTemplateParameters(),
- PT2, PT1, Info, Deduced, TDF_None,
- /*PartialOrdering=*/true);
- if (Better1) {
- SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end());
- InstantiatingTemplate Inst(*this, Loc, PS2, DeducedArgs, Info);
- Better1 = !::FinishTemplateArgumentDeduction(
- *this, PS2, PS1->getTemplateArgs(), Deduced, Info);
- }
-
- // Determine whether PS2 is at least as specialized as PS1
- Deduced.clear();
- Deduced.resize(PS1->getTemplateParameters()->size());
- bool Better2 = !DeduceTemplateArgumentsByTypeMatch(
- *this, PS1->getTemplateParameters(), PT1, PT2, Info, Deduced, TDF_None,
- /*PartialOrdering=*/true);
- if (Better2) {
- SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),
- Deduced.end());
- InstantiatingTemplate Inst(*this, Loc, PS1, DeducedArgs, Info);
- Better2 = !::FinishTemplateArgumentDeduction(
- *this, PS1, PS2->getTemplateArgs(), Deduced, Info);
- }
+ TemplateDeductionInfo Info(Loc);
+ bool Better1 = isAtLeastAsSpecializedAs(*this, PT1, PT2, PS2, Info);
+ bool Better2 = isAtLeastAsSpecializedAs(*this, PT2, PT1, PS1, Info);
if (Better1 == Better2)
return nullptr;
@@ -4578,18 +4666,26 @@ Sema::getMoreSpecializedPartialSpecialization(
return Better1 ? PS1 : PS2;
}
-/// TODO: Unify with ClassTemplatePartialSpecializationDecl version?
-/// May require unifying ClassTemplate(Partial)SpecializationDecl and
-/// VarTemplate(Partial)SpecializationDecl with a new data
-/// structure Template(Partial)SpecializationDecl, and
-/// using Template(Partial)SpecializationDecl as input type.
+bool Sema::isMoreSpecializedThanPrimary(
+ ClassTemplatePartialSpecializationDecl *Spec, TemplateDeductionInfo &Info) {
+ ClassTemplateDecl *Primary = Spec->getSpecializedTemplate();
+ QualType PrimaryT = Primary->getInjectedClassNameSpecialization();
+ QualType PartialT = Spec->getInjectedSpecializationType();
+ if (!isAtLeastAsSpecializedAs(*this, PartialT, PrimaryT, Primary, Info))
+ return false;
+ if (isAtLeastAsSpecializedAs(*this, PrimaryT, PartialT, Spec, Info)) {
+ Info.clearSFINAEDiagnostic();
+ return false;
+ }
+ return true;
+}
+
VarTemplatePartialSpecializationDecl *
Sema::getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc) {
- SmallVector<DeducedTemplateArgument, 4> Deduced;
- TemplateDeductionInfo Info(Loc);
-
+ // Pretend the variable template specializations are class template
+ // specializations and form a fake injected class name type for comparison.
assert(PS1->getSpecializedTemplate() == PS2->getSpecializedTemplate() &&
"the partial specializations being compared should specialize"
" the same template.");
@@ -4600,39 +4696,101 @@ Sema::getMoreSpecializedPartialSpecialization(
QualType PT2 = Context.getTemplateSpecializationType(
CanonTemplate, PS2->getTemplateArgs().asArray());
- // Determine whether PS1 is at least as specialized as PS2
- Deduced.resize(PS2->getTemplateParameters()->size());
- bool Better1 = !DeduceTemplateArgumentsByTypeMatch(
- *this, PS2->getTemplateParameters(), PT2, PT1, Info, Deduced, TDF_None,
- /*PartialOrdering=*/true);
- if (Better1) {
- SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),
- Deduced.end());
- InstantiatingTemplate Inst(*this, Loc, PS2, DeducedArgs, Info);
- Better1 = !::FinishTemplateArgumentDeduction(*this, PS2,
- PS1->getTemplateArgs(),
- Deduced, Info);
+ TemplateDeductionInfo Info(Loc);
+ bool Better1 = isAtLeastAsSpecializedAs(*this, PT1, PT2, PS2, Info);
+ bool Better2 = isAtLeastAsSpecializedAs(*this, PT2, PT1, PS1, Info);
+
+ if (Better1 == Better2)
+ return nullptr;
+
+ return Better1 ? PS1 : PS2;
+}
+
+bool Sema::isMoreSpecializedThanPrimary(
+ VarTemplatePartialSpecializationDecl *Spec, TemplateDeductionInfo &Info) {
+ TemplateDecl *Primary = Spec->getSpecializedTemplate();
+ // FIXME: Cache the injected template arguments rather than recomputing
+ // them for each partial specialization.
+ SmallVector<TemplateArgument, 8> PrimaryArgs;
+ Context.getInjectedTemplateArgs(Primary->getTemplateParameters(),
+ PrimaryArgs);
+
+ TemplateName CanonTemplate =
+ Context.getCanonicalTemplateName(TemplateName(Primary));
+ QualType PrimaryT = Context.getTemplateSpecializationType(
+ CanonTemplate, PrimaryArgs);
+ QualType PartialT = Context.getTemplateSpecializationType(
+ CanonTemplate, Spec->getTemplateArgs().asArray());
+ if (!isAtLeastAsSpecializedAs(*this, PartialT, PrimaryT, Primary, Info))
+ return false;
+ if (isAtLeastAsSpecializedAs(*this, PrimaryT, PartialT, Spec, Info)) {
+ Info.clearSFINAEDiagnostic();
+ return false;
}
+ return true;
+}
- // Determine whether PS2 is at least as specialized as PS1
- Deduced.clear();
- Deduced.resize(PS1->getTemplateParameters()->size());
- bool Better2 = !DeduceTemplateArgumentsByTypeMatch(*this,
- PS1->getTemplateParameters(),
- PT1, PT2, Info, Deduced, TDF_None,
- /*PartialOrdering=*/true);
- if (Better2) {
- SmallVector<TemplateArgument, 4> DeducedArgs(Deduced.begin(),Deduced.end());
- InstantiatingTemplate Inst(*this, Loc, PS1, DeducedArgs, Info);
- Better2 = !::FinishTemplateArgumentDeduction(*this, PS1,
- PS2->getTemplateArgs(),
- Deduced, Info);
+bool Sema::isTemplateTemplateParameterAtLeastAsSpecializedAs(
+ TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc) {
+ // C++1z [temp.arg.template]p4: (DR 150)
+ // A template template-parameter P is at least as specialized as a
+ // template template-argument A if, given the following rewrite to two
+ // function templates...
+
+ // Rather than synthesize function templates, we merely perform the
+ // equivalent partial ordering by performing deduction directly on
+ // the template parameter lists of the template template parameters.
+ //
+ // Given an invented class template X with the template parameter list of
+ // A (including default arguments):
+ TemplateName X = Context.getCanonicalTemplateName(TemplateName(AArg));
+ TemplateParameterList *A = AArg->getTemplateParameters();
+
+ // - Each function template has a single function parameter whose type is
+ // a specialization of X with template arguments corresponding to the
+ // template parameters from the respective function template
+ SmallVector<TemplateArgument, 8> AArgs;
+ Context.getInjectedTemplateArgs(A, AArgs);
+
+ // Check P's arguments against A's parameter list. This will fill in default
+ // template arguments as needed. AArgs are already correct by construction.
+ // We can't just use CheckTemplateIdType because that will expand alias
+ // templates.
+ SmallVector<TemplateArgument, 4> PArgs;
+ {
+ SFINAETrap Trap(*this);
+
+ Context.getInjectedTemplateArgs(P, PArgs);
+ TemplateArgumentListInfo PArgList(P->getLAngleLoc(), P->getRAngleLoc());
+ for (unsigned I = 0, N = P->size(); I != N; ++I) {
+ // Unwrap packs that getInjectedTemplateArgs wrapped around pack
+ // expansions, to form an "as written" argument list.
+ TemplateArgument Arg = PArgs[I];
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ assert(Arg.pack_size() == 1 && Arg.pack_begin()->isPackExpansion());
+ Arg = *Arg.pack_begin();
+ }
+ PArgList.addArgument(getTrivialTemplateArgumentLoc(
+ Arg, QualType(), P->getParam(I)->getLocation()));
+ }
+ PArgs.clear();
+
+ // C++1z [temp.arg.template]p3:
+ // If the rewrite produces an invalid type, then P is not at least as
+ // specialized as A.
+ if (CheckTemplateArgumentList(AArg, Loc, PArgList, false, PArgs) ||
+ Trap.hasErrorOccurred())
+ return false;
}
- if (Better1 == Better2)
- return nullptr;
+ QualType AType = Context.getTemplateSpecializationType(X, AArgs);
+ QualType PType = Context.getTemplateSpecializationType(X, PArgs);
- return Better1? PS1 : PS2;
+ // ... the function template corresponding to P is at least as specialized
+ // as the function template corresponding to A according to the partial
+ // ordering rules for function templates.
+ TemplateDeductionInfo Info(Loc, A->getDepth());
+ return isAtLeastAsSpecializedAs(*this, PType, AType, AArg, Info);
}
static void
@@ -4679,6 +4837,11 @@ MarkUsedTemplateParameters(ASTContext &Ctx,
if (NTTP->getDepth() == Depth)
Used[NTTP->getIndex()] = true;
+
+ // In C++1z mode, additional arguments may be deduced from the type of a
+ // non-type argument.
+ if (Ctx.getLangOpts().CPlusPlus1z)
+ MarkUsedTemplateParameters(Ctx, NTTP->getType(), OnlyDeduced, Depth, Used);
}
/// \brief Mark the template parameters that are used by the given
@@ -4846,7 +5009,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
// not the last template argument, the entire template argument list is a
// non-deduced context.
if (OnlyDeduced &&
- hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs()))
+ hasPackExpansionBeforeEnd(Spec->template_arguments()))
break;
for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
@@ -4925,7 +5088,7 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
case Type::UnaryTransform:
if (!OnlyDeduced)
MarkUsedTemplateParameters(Ctx,
- cast<UnaryTransformType>(T)->getUnderlyingType(),
+ cast<UnaryTransformType>(T)->getUnderlyingType(),
OnlyDeduced, Depth, Used);
break;
@@ -5021,7 +5184,7 @@ Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
// the last template argument, the entire template argument list is a
// non-deduced context.
if (OnlyDeduced &&
- hasPackExpansionBeforeEnd(TemplateArgs.data(), TemplateArgs.size()))
+ hasPackExpansionBeforeEnd(TemplateArgs.asArray()))
return;
for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
@@ -5054,7 +5217,7 @@ bool hasDeducibleTemplateParameters(Sema &S,
TemplateParameterList *TemplateParams
= FunctionTemplate->getTemplateParameters();
llvm::SmallBitVector Deduced(TemplateParams->size());
- ::MarkUsedTemplateParameters(S.Context, T, true, TemplateParams->getDepth(),
+ ::MarkUsedTemplateParameters(S.Context, T, true, TemplateParams->getDepth(),
Deduced);
return Deduced.any();
diff --git a/lib/Sema/SemaTemplateInstantiate.cpp b/lib/Sema/SemaTemplateInstantiate.cpp
index 65a5633bf0d5..160c9f090788 100644
--- a/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/lib/Sema/SemaTemplateInstantiate.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
+#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/LangOptions.h"
@@ -208,9 +209,11 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
sema::TemplateDeductionInfo *DeductionInfo)
: SemaRef(SemaRef), SavedInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext) {
- // Don't allow further instantiation if a fatal error has occcured. Any
- // diagnostics we might have raised will not be visible.
- if (SemaRef.Diags.hasFatalErrorOccurred()) {
+ // Don't allow further instantiation if a fatal error and an uncompilable
+ // error have occurred. Any diagnostics we might have raised will not be
+ // visible, and we do not need to construct a correct AST.
+ if (SemaRef.Diags.hasFatalErrorOccurred() &&
+ SemaRef.Diags.hasUncompilableErrorOccurred()) {
Invalid = true;
return;
}
@@ -276,6 +279,17 @@ Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema::InstantiatingTemplate::InstantiatingTemplate(
Sema &SemaRef, SourceLocation PointOfInstantiation,
+ TemplateDecl *Template,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange)
+ : InstantiatingTemplate(
+ SemaRef,
+ ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
+ PointOfInstantiation, InstantiationRange, Template, nullptr,
+ TemplateArgs, &DeductionInfo) {}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(
+ Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange)
@@ -420,8 +434,7 @@ void Sema::PrintInstantiationStack() {
if (isa<ClassTemplateSpecializationDecl>(Record))
DiagID = diag::note_template_class_instantiation_here;
Diags.Report(Active->PointOfInstantiation, DiagID)
- << Context.getTypeDeclType(Record)
- << Active->InstantiationRange;
+ << Record << Active->InstantiationRange;
} else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
unsigned DiagID;
if (Function->getPrimaryTemplate())
@@ -482,29 +495,43 @@ void Sema::PrintInstantiationStack() {
break;
}
- case ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution:
- if (ClassTemplatePartialSpecializationDecl *PartialSpec =
- dyn_cast<ClassTemplatePartialSpecializationDecl>(Active->Entity)) {
+ case ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution: {
+ if (FunctionTemplateDecl *FnTmpl =
+ dyn_cast<FunctionTemplateDecl>(Active->Entity)) {
Diags.Report(Active->PointOfInstantiation,
- diag::note_partial_spec_deduct_instantiation_here)
- << Context.getTypeDeclType(PartialSpec)
- << getTemplateArgumentBindingsText(
- PartialSpec->getTemplateParameters(),
+ diag::note_function_template_deduction_instantiation_here)
+ << FnTmpl
+ << getTemplateArgumentBindingsText(FnTmpl->getTemplateParameters(),
Active->TemplateArgs,
Active->NumTemplateArgs)
<< Active->InstantiationRange;
} else {
- FunctionTemplateDecl *FnTmpl
- = cast<FunctionTemplateDecl>(Active->Entity);
+ bool IsVar = isa<VarTemplateDecl>(Active->Entity) ||
+ isa<VarTemplateSpecializationDecl>(Active->Entity);
+ bool IsTemplate = false;
+ TemplateParameterList *Params;
+ if (auto *D = dyn_cast<TemplateDecl>(Active->Entity)) {
+ IsTemplate = true;
+ Params = D->getTemplateParameters();
+ } else if (auto *D = dyn_cast<ClassTemplatePartialSpecializationDecl>(
+ Active->Entity)) {
+ Params = D->getTemplateParameters();
+ } else if (auto *D = dyn_cast<VarTemplatePartialSpecializationDecl>(
+ Active->Entity)) {
+ Params = D->getTemplateParameters();
+ } else {
+ llvm_unreachable("unexpected template kind");
+ }
+
Diags.Report(Active->PointOfInstantiation,
- diag::note_function_template_deduction_instantiation_here)
- << FnTmpl
- << getTemplateArgumentBindingsText(FnTmpl->getTemplateParameters(),
- Active->TemplateArgs,
+ diag::note_deduced_template_arg_substitution_here)
+ << IsVar << IsTemplate << cast<NamedDecl>(Active->Entity)
+ << getTemplateArgumentBindingsText(Params, Active->TemplateArgs,
Active->NumTemplateArgs)
<< Active->InstantiationRange;
}
break;
+ }
case ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation: {
ParmVarDecl *Param = cast<ParmVarDecl>(Active->Entity);
@@ -1178,8 +1205,8 @@ ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
cast<PackExpansionType>(parm->getType())->getPattern(),
TemplateArgs, loc, parm->getDeclName());
} else {
- type = SemaRef.SubstType(parm->getType(), TemplateArgs,
- loc, parm->getDeclName());
+ type = SemaRef.SubstType(VD ? arg.getParamTypeForDecl() : arg.getNullPtrType(),
+ TemplateArgs, loc, parm->getDeclName());
}
assert(!type.isNull() && "type substitution failed for param type");
assert(!type->isDependentType() && "param type still dependent");
@@ -1684,7 +1711,7 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
// Instantiate default arguments for methods of local classes (DR1484)
// and non-defining declarations.
Sema::ContextRAII SavedContext(*this, OwningFunc);
- LocalInstantiationScope Local(*this);
+ LocalInstantiationScope Local(*this, true);
ExprResult NewArg = SubstExpr(Arg, TemplateArgs);
if (NewArg.isUsable()) {
// It would be nice if we still had this.
@@ -1858,62 +1885,6 @@ namespace clang {
}
}
-/// Determine whether we would be unable to instantiate this template (because
-/// it either has no definition, or is in the process of being instantiated).
-static bool DiagnoseUninstantiableTemplate(Sema &S,
- SourceLocation PointOfInstantiation,
- TagDecl *Instantiation,
- bool InstantiatedFromMember,
- TagDecl *Pattern,
- TagDecl *PatternDef,
- TemplateSpecializationKind TSK,
- bool Complain = true) {
- if (PatternDef && !PatternDef->isBeingDefined()) {
- NamedDecl *SuggestedDef = nullptr;
- if (!S.hasVisibleDefinition(PatternDef, &SuggestedDef,
- /*OnlyNeedComplete*/false)) {
- // If we're allowed to diagnose this and recover, do so.
- bool Recover = Complain && !S.isSFINAEContext();
- if (Complain)
- S.diagnoseMissingImport(PointOfInstantiation, SuggestedDef,
- Sema::MissingImportKind::Definition, Recover);
- return !Recover;
- }
- return false;
- }
-
- if (!Complain || (PatternDef && PatternDef->isInvalidDecl())) {
- // Say nothing
- } else if (PatternDef) {
- assert(PatternDef->isBeingDefined());
- S.Diag(PointOfInstantiation,
- diag::err_template_instantiate_within_definition)
- << (TSK != TSK_ImplicitInstantiation)
- << S.Context.getTypeDeclType(Instantiation);
- // Not much point in noting the template declaration here, since
- // we're lexically inside it.
- Instantiation->setInvalidDecl();
- } else if (InstantiatedFromMember) {
- S.Diag(PointOfInstantiation,
- diag::err_implicit_instantiate_member_undefined)
- << S.Context.getTypeDeclType(Instantiation);
- S.Diag(Pattern->getLocation(), diag::note_member_declared_at);
- } else {
- S.Diag(PointOfInstantiation, diag::err_template_instantiate_undefined)
- << (TSK != TSK_ImplicitInstantiation)
- << S.Context.getTypeDeclType(Instantiation);
- S.Diag(Pattern->getLocation(), diag::note_template_decl_here);
- }
-
- // In general, Instantiation isn't marked invalid to get more than one
- // error for multiple undefined instantiations. But the code that does
- // explicit declaration -> explicit definition conversion can't handle
- // invalid declarations, so mark as invalid in that case.
- if (TSK == TSK_ExplicitInstantiationDeclaration)
- Instantiation->setInvalidDecl();
- return true;
-}
-
/// \brief Instantiate the definition of a class from a given pattern.
///
/// \param PointOfInstantiation The point of instantiation within the
@@ -1944,7 +1915,7 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
bool Complain) {
CXXRecordDecl *PatternDef
= cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
- if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
+ if (DiagnoseUninstantiableTemplate(PointOfInstantiation, Instantiation,
Instantiation->getInstantiatedFromMemberClass(),
Pattern, PatternDef, TSK, Complain))
return true;
@@ -2174,7 +2145,7 @@ bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK) {
EnumDecl *PatternDef = Pattern->getDefinition();
- if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
+ if (DiagnoseUninstantiableTemplate(PointOfInstantiation, Instantiation,
Instantiation->getInstantiatedFromMemberEnum(),
Pattern, PatternDef, TSK,/*Complain*/true))
return true;
@@ -2251,14 +2222,10 @@ bool Sema::InstantiateInClassInitializer(
if (!OldInit) {
RecordDecl *PatternRD = Pattern->getParent();
RecordDecl *OutermostClass = PatternRD->getOuterLexicalRecordContext();
- if (OutermostClass == PatternRD) {
- Diag(Pattern->getLocEnd(), diag::err_in_class_initializer_not_yet_parsed)
- << PatternRD << Pattern;
- } else {
- Diag(Pattern->getLocEnd(),
- diag::err_in_class_initializer_not_yet_parsed_outer_class)
- << PatternRD << OutermostClass << Pattern;
- }
+ Diag(PointOfInstantiation,
+ diag::err_in_class_initializer_not_yet_parsed)
+ << OutermostClass << Pattern;
+ Diag(Pattern->getLocEnd(), diag::note_in_class_initializer_not_yet_parsed);
Instantiation->setInvalidDecl();
return true;
}
@@ -2294,6 +2261,9 @@ bool Sema::InstantiateInClassInitializer(
ActOnFinishCXXInClassMemberInitializer(
Instantiation, Init ? Init->getLocStart() : SourceLocation(), Init);
+ if (auto *L = getASTMutationListener())
+ L->DefaultMemberInitializerInstantiated(Instantiation);
+
// Exit the scope of this instantiation.
SavedContext.pop();
diff --git a/lib/Sema/SemaTemplateInstantiateDecl.cpp b/lib/Sema/SemaTemplateInstantiateDecl.cpp
index dd3748fb5337..7328dcb8760f 100644
--- a/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/PrettyDeclStackTrace.h"
#include "clang/Sema/Template.h"
@@ -178,7 +179,7 @@ static void instantiateDependentEnableIfAttr(
return;
Cond = Result.getAs<Expr>();
}
- if (A->getCond()->isTypeDependent() && !Cond->isTypeDependent()) {
+ if (!Cond->isTypeDependent()) {
ExprResult Converted = S.PerformContextuallyConvertToBool(Cond);
if (Converted.isInvalid())
return;
@@ -187,7 +188,7 @@ static void instantiateDependentEnableIfAttr(
SmallVector<PartialDiagnosticAt, 8> Diags;
if (A->getCond()->isValueDependent() && !Cond->isValueDependent() &&
- !Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(Tmpl),
+ !Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(New),
Diags)) {
S.Diag(A->getLocation(), diag::err_enable_if_never_constant_expr);
for (int I = 0, N = Diags.size(); I != N; ++I)
@@ -331,8 +332,7 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
continue;
}
- const EnableIfAttr *EnableIf = dyn_cast<EnableIfAttr>(TmplAttr);
- if (EnableIf && EnableIf->getCond()->isValueDependent()) {
+ if (const auto *EnableIf = dyn_cast<EnableIfAttr>(TmplAttr)) {
instantiateDependentEnableIfAttr(*this, TemplateArgs, EnableIf, Tmpl,
New);
continue;
@@ -598,12 +598,37 @@ TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
return Inst;
}
+Decl *TemplateDeclInstantiator::VisitBindingDecl(BindingDecl *D) {
+ auto *NewBD = BindingDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getIdentifier());
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, NewBD);
+ return NewBD;
+}
+
+Decl *TemplateDeclInstantiator::VisitDecompositionDecl(DecompositionDecl *D) {
+ // Transform the bindings first.
+ SmallVector<BindingDecl*, 16> NewBindings;
+ for (auto *OldBD : D->bindings())
+ NewBindings.push_back(cast<BindingDecl>(VisitBindingDecl(OldBD)));
+ ArrayRef<BindingDecl*> NewBindingArray = NewBindings;
+
+ auto *NewDD = cast_or_null<DecompositionDecl>(
+ VisitVarDecl(D, /*InstantiatingVarTemplate=*/false, &NewBindingArray));
+
+ if (!NewDD || NewDD->isInvalidDecl())
+ for (auto *NewBD : NewBindings)
+ NewBD->setInvalidDecl();
+
+ return NewDD;
+}
+
Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
return VisitVarDecl(D, /*InstantiatingVarTemplate=*/false);
}
Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D,
- bool InstantiatingVarTemplate) {
+ bool InstantiatingVarTemplate,
+ ArrayRef<BindingDecl*> *Bindings) {
// Do substitution on the type of the declaration
TypeSourceInfo *DI = SemaRef.SubstType(D->getTypeSourceInfo(),
@@ -624,9 +649,15 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D,
SemaRef.adjustContextForLocalExternDecl(DC);
// Build the instantiated declaration.
- VarDecl *Var = VarDecl::Create(SemaRef.Context, DC, D->getInnerLocStart(),
- D->getLocation(), D->getIdentifier(),
- DI->getType(), DI, D->getStorageClass());
+ VarDecl *Var;
+ if (Bindings)
+ Var = DecompositionDecl::Create(SemaRef.Context, DC, D->getInnerLocStart(),
+ D->getLocation(), DI->getType(), DI,
+ D->getStorageClass(), *Bindings);
+ else
+ Var = VarDecl::Create(SemaRef.Context, DC, D->getInnerLocStart(),
+ D->getLocation(), D->getIdentifier(), DI->getType(),
+ DI, D->getStorageClass());
// In ARC, infer 'retaining' for variables of retainable type.
if (SemaRef.getLangOpts().ObjCAutoRefCount &&
@@ -1840,11 +1871,13 @@ TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
Constructor->isExplicit(),
Constructor->isInlineSpecified(),
false, Constructor->isConstexpr());
+ Method->setRangeEnd(Constructor->getLocEnd());
} else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
Method = CXXDestructorDecl::Create(SemaRef.Context, Record,
StartLoc, NameInfo, T, TInfo,
Destructor->isInlineSpecified(),
false);
+ Method->setRangeEnd(Destructor->getLocEnd());
} else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
Method = CXXConversionDecl::Create(SemaRef.Context, Record,
StartLoc, NameInfo, T, TInfo,
@@ -2052,18 +2085,18 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
ExpandedParameterPackTypes.reserve(D->getNumExpansionTypes());
ExpandedParameterPackTypesAsWritten.reserve(D->getNumExpansionTypes());
for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
- TypeSourceInfo *NewDI =SemaRef.SubstType(D->getExpansionTypeSourceInfo(I),
- TemplateArgs,
- D->getLocation(),
- D->getDeclName());
+ TypeSourceInfo *NewDI =
+ SemaRef.SubstType(D->getExpansionTypeSourceInfo(I), TemplateArgs,
+ D->getLocation(), D->getDeclName());
if (!NewDI)
return nullptr;
- ExpandedParameterPackTypesAsWritten.push_back(NewDI);
- QualType NewT =SemaRef.CheckNonTypeTemplateParameterType(NewDI->getType(),
- D->getLocation());
+ QualType NewT =
+ SemaRef.CheckNonTypeTemplateParameterType(NewDI, D->getLocation());
if (NewT.isNull())
return nullptr;
+
+ ExpandedParameterPackTypesAsWritten.push_back(NewDI);
ExpandedParameterPackTypes.push_back(NewT);
}
@@ -2103,12 +2136,12 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
if (!NewDI)
return nullptr;
- ExpandedParameterPackTypesAsWritten.push_back(NewDI);
- QualType NewT = SemaRef.CheckNonTypeTemplateParameterType(
- NewDI->getType(),
- D->getLocation());
+ QualType NewT =
+ SemaRef.CheckNonTypeTemplateParameterType(NewDI, D->getLocation());
if (NewT.isNull())
return nullptr;
+
+ ExpandedParameterPackTypesAsWritten.push_back(NewDI);
ExpandedParameterPackTypes.push_back(NewT);
}
@@ -2128,6 +2161,7 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
if (!NewPattern)
return nullptr;
+ SemaRef.CheckNonTypeTemplateParameterType(NewPattern, D->getLocation());
DI = SemaRef.CheckPackExpansion(NewPattern, Expansion.getEllipsisLoc(),
NumExpansions);
if (!DI)
@@ -2143,8 +2177,7 @@ Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
return nullptr;
// Check that this type is acceptable for a non-type template parameter.
- T = SemaRef.CheckNonTypeTemplateParameterType(DI->getType(),
- D->getLocation());
+ T = SemaRef.CheckNonTypeTemplateParameterType(DI, D->getLocation());
if (T.isNull()) {
T = SemaRef.Context.IntTy;
Invalid = true;
@@ -2397,8 +2430,8 @@ Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
}
if (!NewUD->isInvalidDecl() &&
- SemaRef.CheckUsingDeclQualifier(D->getUsingLoc(), SS, NameInfo,
- D->getLocation()))
+ SemaRef.CheckUsingDeclQualifier(D->getUsingLoc(), D->hasTypename(),
+ SS, NameInfo, D->getLocation()))
NewUD->setInvalidDecl();
SemaRef.Context.setInstantiatedFromUsingDecl(NewUD, D);
@@ -2462,35 +2495,76 @@ Decl *TemplateDeclInstantiator::VisitConstructorUsingShadowDecl(
return nullptr;
}
-Decl * TemplateDeclInstantiator
- ::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
- NestedNameSpecifierLoc QualifierLoc
- = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(),
- TemplateArgs);
- if (!QualifierLoc)
- return nullptr;
+template <typename T>
+Decl *TemplateDeclInstantiator::instantiateUnresolvedUsingDecl(
+ T *D, bool InstantiatingPackElement) {
+ // If this is a pack expansion, expand it now.
+ if (D->isPackExpansion() && !InstantiatingPackElement) {
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(D->getQualifierLoc(), Unexpanded);
+ SemaRef.collectUnexpandedParameterPacks(D->getNameInfo(), Unexpanded);
- CXXScopeSpec SS;
- SS.Adopt(QualifierLoc);
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ Optional<unsigned> NumExpansions;
+ if (SemaRef.CheckParameterPacksForExpansion(
+ D->getEllipsisLoc(), D->getSourceRange(), Unexpanded, TemplateArgs,
+ Expand, RetainExpansion, NumExpansions))
+ return nullptr;
- // Since NameInfo refers to a typename, it cannot be a C++ special name.
- // Hence, no transformation is required for it.
- DeclarationNameInfo NameInfo(D->getDeclName(), D->getLocation());
- NamedDecl *UD =
- SemaRef.BuildUsingDeclaration(/*Scope*/ nullptr, D->getAccess(),
- D->getUsingLoc(), SS, NameInfo, nullptr,
- /*instantiation*/ true,
- /*typename*/ true, D->getTypenameLoc());
- if (UD)
- SemaRef.Context.setInstantiatedFromUsingDecl(cast<UsingDecl>(UD), D);
+ // This declaration cannot appear within a function template signature,
+ // so we can't have a partial argument list for a parameter pack.
+ assert(!RetainExpansion &&
+ "should never need to retain an expansion for UsingPackDecl");
- return UD;
-}
+ if (!Expand) {
+ // We cannot fully expand the pack expansion now, so substitute into the
+ // pattern and create a new pack expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1);
+ return instantiateUnresolvedUsingDecl(D, true);
+ }
+
+ // Within a function, we don't have any normal way to check for conflicts
+ // between shadow declarations from different using declarations in the
+ // same pack expansion, but this is always ill-formed because all expansions
+ // must produce (conflicting) enumerators.
+ //
+ // Sadly we can't just reject this in the template definition because it
+ // could be valid if the pack is empty or has exactly one expansion.
+ if (D->getDeclContext()->isFunctionOrMethod() && *NumExpansions > 1) {
+ SemaRef.Diag(D->getEllipsisLoc(),
+ diag::err_using_decl_redeclaration_expansion);
+ return nullptr;
+ }
+
+ // Instantiate the slices of this pack and build a UsingPackDecl.
+ SmallVector<NamedDecl*, 8> Expansions;
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I);
+ Decl *Slice = instantiateUnresolvedUsingDecl(D, true);
+ if (!Slice)
+ return nullptr;
+ // Note that we can still get unresolved using declarations here, if we
+ // had arguments for all packs but the pattern also contained other
+ // template arguments (this only happens during partial substitution, eg
+ // into the body of a generic lambda in a function template).
+ Expansions.push_back(cast<NamedDecl>(Slice));
+ }
+
+ auto *NewD = SemaRef.BuildUsingPackDecl(D, Expansions);
+ if (isDeclWithinFunction(D))
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, NewD);
+ return NewD;
+ }
+
+ UnresolvedUsingTypenameDecl *TD = dyn_cast<UnresolvedUsingTypenameDecl>(D);
+ SourceLocation TypenameLoc = TD ? TD->getTypenameLoc() : SourceLocation();
-Decl * TemplateDeclInstantiator
- ::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
NestedNameSpecifierLoc QualifierLoc
- = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(), TemplateArgs);
+ = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(),
+ TemplateArgs);
if (!QualifierLoc)
return nullptr;
@@ -2500,17 +2574,48 @@ Decl * TemplateDeclInstantiator
DeclarationNameInfo NameInfo
= SemaRef.SubstDeclarationNameInfo(D->getNameInfo(), TemplateArgs);
- NamedDecl *UD =
- SemaRef.BuildUsingDeclaration(/*Scope*/ nullptr, D->getAccess(),
- D->getUsingLoc(), SS, NameInfo, nullptr,
- /*instantiation*/ true,
- /*typename*/ false, SourceLocation());
+ // Produce a pack expansion only if we're not instantiating a particular
+ // slice of a pack expansion.
+ bool InstantiatingSlice = D->getEllipsisLoc().isValid() &&
+ SemaRef.ArgumentPackSubstitutionIndex != -1;
+ SourceLocation EllipsisLoc =
+ InstantiatingSlice ? SourceLocation() : D->getEllipsisLoc();
+
+ NamedDecl *UD = SemaRef.BuildUsingDeclaration(
+ /*Scope*/ nullptr, D->getAccess(), D->getUsingLoc(),
+ /*HasTypename*/ TD, TypenameLoc, SS, NameInfo, EllipsisLoc, nullptr,
+ /*IsInstantiation*/ true);
if (UD)
- SemaRef.Context.setInstantiatedFromUsingDecl(cast<UsingDecl>(UD), D);
+ SemaRef.Context.setInstantiatedFromUsingDecl(UD, D);
return UD;
}
+Decl *TemplateDeclInstantiator::VisitUnresolvedUsingTypenameDecl(
+ UnresolvedUsingTypenameDecl *D) {
+ return instantiateUnresolvedUsingDecl(D);
+}
+
+Decl *TemplateDeclInstantiator::VisitUnresolvedUsingValueDecl(
+ UnresolvedUsingValueDecl *D) {
+ return instantiateUnresolvedUsingDecl(D);
+}
+
+Decl *TemplateDeclInstantiator::VisitUsingPackDecl(UsingPackDecl *D) {
+ SmallVector<NamedDecl*, 8> Expansions;
+ for (auto *UD : D->expansions()) {
+ if (auto *NewUD =
+ SemaRef.FindInstantiatedDecl(D->getLocation(), UD, TemplateArgs))
+ Expansions.push_back(cast<NamedDecl>(NewUD));
+ else
+ return nullptr;
+ }
+
+ auto *NewD = SemaRef.BuildUsingPackDecl(D, Expansions);
+ if (isDeclWithinFunction(D))
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, NewD);
+ return NewD;
+}
Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl(
ClassScopeFunctionSpecializationDecl *Decl) {
@@ -2922,10 +3027,14 @@ TemplateDeclInstantiator::SubstTemplateParams(TemplateParameterList *L) {
if (Invalid)
return nullptr;
+ // Note: we substitute into associated constraints later
+ Expr *const UninstantiatedRequiresClause = L->getRequiresClause();
+
TemplateParameterList *InstL
= TemplateParameterList::Create(SemaRef.Context, L->getTemplateLoc(),
L->getLAngleLoc(), Params,
- L->getRAngleLoc());
+ L->getRAngleLoc(),
+ UninstantiatedRequiresClause);
return InstL;
}
@@ -2977,6 +3086,12 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
Converted))
return nullptr;
+ // Check these arguments are valid for a template partial specialization.
+ if (SemaRef.CheckTemplatePartialSpecializationArgs(
+ PartialSpec->getLocation(), ClassTemplate, InstTemplateArgs.size(),
+ Converted))
+ return nullptr;
+
// Figure out where to insert this class template partial specialization
// in the member template's set of class template partial specializations.
void *InsertPos = nullptr;
@@ -3047,6 +3162,9 @@ TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
InstPartialSpec->setInstantiatedFromMember(PartialSpec);
InstPartialSpec->setTypeAsWritten(WrittenTy);
+ // Check the completed partial specialization.
+ SemaRef.CheckTemplatePartialSpecialization(InstPartialSpec);
+
// Add this partial specialization to the set of class template partial
// specializations.
ClassTemplate->AddPartialSpecialization(InstPartialSpec,
@@ -3099,6 +3217,12 @@ TemplateDeclInstantiator::InstantiateVarTemplatePartialSpecialization(
InstTemplateArgs, false, Converted))
return nullptr;
+ // Check these arguments are valid for a template partial specialization.
+ if (SemaRef.CheckTemplatePartialSpecializationArgs(
+ PartialSpec->getLocation(), VarTemplate, InstTemplateArgs.size(),
+ Converted))
+ return nullptr;
+
// Figure out where to insert this variable template partial specialization
// in the member template's set of variable template partial specializations.
void *InsertPos = nullptr;
@@ -3173,6 +3297,9 @@ TemplateDeclInstantiator::InstantiateVarTemplatePartialSpecialization(
InstPartialSpec->setInstantiatedFromMember(PartialSpec);
InstPartialSpec->setTypeAsWritten(WrittenTy);
+ // Check the completed partial specialization.
+ SemaRef.CheckTemplatePartialSpecialization(InstPartialSpec);
+
// Add this partial specialization to the set of variable template partial
// specializations. The instantiation of the initializer is not necessary.
VarTemplate->AddPartialSpecialization(InstPartialSpec, /*InsertPos=*/nullptr);
@@ -3516,7 +3643,8 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// Never instantiate an explicit specialization except if it is a class scope
// explicit specialization.
- if (Function->getTemplateSpecializationKind() == TSK_ExplicitSpecialization &&
+ TemplateSpecializationKind TSK = Function->getTemplateSpecializationKind();
+ if (TSK == TSK_ExplicitSpecialization &&
!Function->getClassScopeSpecializationPattern())
return;
@@ -3524,13 +3652,40 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
const FunctionDecl *PatternDecl = Function->getTemplateInstantiationPattern();
assert(PatternDecl && "instantiating a non-template");
- Stmt *Pattern = PatternDecl->getBody(PatternDecl);
- assert(PatternDecl && "template definition is not a template");
- if (!Pattern) {
- // Try to find a defaulted definition
- PatternDecl->isDefined(PatternDecl);
+ const FunctionDecl *PatternDef = PatternDecl->getDefinition();
+ Stmt *Pattern = nullptr;
+ if (PatternDef) {
+ Pattern = PatternDef->getBody(PatternDef);
+ PatternDecl = PatternDef;
+ }
+
+ // FIXME: We need to track the instantiation stack in order to know which
+ // definitions should be visible within this instantiation.
+ if (DiagnoseUninstantiableTemplate(PointOfInstantiation, Function,
+ Function->getInstantiatedFromMemberFunction(),
+ PatternDecl, PatternDef, TSK,
+ /*Complain*/DefinitionRequired)) {
+ if (DefinitionRequired)
+ Function->setInvalidDecl();
+ else if (TSK == TSK_ExplicitInstantiationDefinition) {
+ // Try again at the end of the translation unit (at which point a
+ // definition will be required).
+ assert(!Recursive);
+ PendingInstantiations.push_back(
+ std::make_pair(Function, PointOfInstantiation));
+ } else if (TSK == TSK_ImplicitInstantiation) {
+ if (AtEndOfTU && !getDiagnostics().hasErrorOccurred()) {
+ Diag(PointOfInstantiation, diag::warn_func_template_missing)
+ << Function;
+ Diag(PatternDecl->getLocation(), diag::note_forward_template_decl);
+ if (getLangOpts().CPlusPlus11)
+ Diag(PointOfInstantiation, diag::note_inst_declaration_hint)
+ << Function;
+ }
+ }
+
+ return;
}
- assert(PatternDecl && "template definition is not a template");
// Postpone late parsed template instantiations.
if (PatternDecl->isLateTemplateParsed() &&
@@ -3558,58 +3713,23 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
if (PatternDecl->isFromASTFile())
ExternalSource->ReadLateParsedTemplates(LateParsedTemplateMap);
- LateParsedTemplate *LPT = LateParsedTemplateMap.lookup(PatternDecl);
- assert(LPT && "missing LateParsedTemplate");
- LateTemplateParser(OpaqueParser, *LPT);
+ auto LPTIter = LateParsedTemplateMap.find(PatternDecl);
+ assert(LPTIter != LateParsedTemplateMap.end() &&
+ "missing LateParsedTemplate");
+ LateTemplateParser(OpaqueParser, *LPTIter->second);
Pattern = PatternDecl->getBody(PatternDecl);
}
- // FIXME: Check that the definition is visible before trying to instantiate
- // it. This requires us to track the instantiation stack in order to know
- // which definitions should be visible.
-
- if (!Pattern && !PatternDecl->isDefaulted()) {
- if (DefinitionRequired) {
- if (Function->getPrimaryTemplate())
- Diag(PointOfInstantiation,
- diag::err_explicit_instantiation_undefined_func_template)
- << Function->getPrimaryTemplate();
- else
- Diag(PointOfInstantiation,
- diag::err_explicit_instantiation_undefined_member)
- << 1 << Function->getDeclName() << Function->getDeclContext();
-
- if (PatternDecl)
- Diag(PatternDecl->getLocation(),
- diag::note_explicit_instantiation_here);
- Function->setInvalidDecl();
- } else if (Function->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDefinition) {
- assert(!Recursive);
- PendingInstantiations.push_back(
- std::make_pair(Function, PointOfInstantiation));
- } else if (Function->getTemplateSpecializationKind()
- == TSK_ImplicitInstantiation) {
- if (AtEndOfTU && !getDiagnostics().hasErrorOccurred()) {
- Diag(PointOfInstantiation, diag::warn_func_template_missing)
- << Function;
- Diag(PatternDecl->getLocation(), diag::note_forward_template_decl);
- if (getLangOpts().CPlusPlus11)
- Diag(PointOfInstantiation, diag::note_inst_declaration_hint)
- << Function;
- }
- }
-
- return;
- }
+ // Note, we should never try to instantiate a deleted function template.
+ assert((Pattern || PatternDecl->isDefaulted()) &&
+ "unexpected kind of function template definition");
// C++1y [temp.explicit]p10:
// Except for inline functions, declarations with types deduced from their
// initializer or return value, and class template specializations, other
// explicit instantiation declarations have the effect of suppressing the
// implicit instantiation of the entity to which they refer.
- if (Function->getTemplateSpecializationKind() ==
- TSK_ExplicitInstantiationDeclaration &&
+ if (TSK == TSK_ExplicitInstantiationDeclaration &&
!PatternDecl->isInlined() &&
!PatternDecl->getReturnType()->getContainedAutoType())
return;
@@ -3631,6 +3751,10 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
PrettyDeclStackTraceEntry CrashInfo(*this, Function, SourceLocation(),
"instantiating function definition");
+ // The instantiation is visible here, even if it was first declared in an
+ // unimported module.
+ Function->setHidden(false);
+
// Copy the inner loc start from the pattern.
Function->setInnerLocStart(PatternDecl->getInnerLocStart());
@@ -4035,6 +4159,10 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
PrettyDeclStackTraceEntry CrashInfo(*this, Var, SourceLocation(),
"instantiating variable initializer");
+ // The instantiation is visible here, even if it was first declared in an
+ // unimported module.
+ Var->setHidden(false);
+
// If we're performing recursive template instantiation, create our own
// queue of pending implicit instantiations that we will instantiate
// later, while we're still within our own instantiation context.
@@ -4083,33 +4211,17 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
Def = PatternDecl->getDefinition();
}
- // FIXME: Check that the definition is visible before trying to instantiate
- // it. This requires us to track the instantiation stack in order to know
- // which definitions should be visible.
+ TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
// If we don't have a definition of the variable template, we won't perform
// any instantiation. Rather, we rely on the user to instantiate this
// definition (or provide a specialization for it) in another translation
// unit.
- if (!Def) {
- if (DefinitionRequired) {
- if (VarSpec)
- Diag(PointOfInstantiation,
- diag::err_explicit_instantiation_undefined_var_template) << Var;
- else
- Diag(PointOfInstantiation,
- diag::err_explicit_instantiation_undefined_member)
- << 2 << Var->getDeclName() << Var->getDeclContext();
- Diag(PatternDecl->getLocation(),
- diag::note_explicit_instantiation_here);
- if (VarSpec)
- Var->setInvalidDecl();
- } else if (Var->getTemplateSpecializationKind()
- == TSK_ExplicitInstantiationDefinition) {
+ if (!Def && !DefinitionRequired) {
+ if (TSK == TSK_ExplicitInstantiationDefinition) {
PendingInstantiations.push_back(
std::make_pair(Var, PointOfInstantiation));
- } else if (Var->getTemplateSpecializationKind()
- == TSK_ImplicitInstantiation) {
+ } else if (TSK == TSK_ImplicitInstantiation) {
// Warn about missing definition at the end of translation unit.
if (AtEndOfTU && !getDiagnostics().hasErrorOccurred()) {
Diag(PointOfInstantiation, diag::warn_var_template_missing)
@@ -4118,12 +4230,20 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
if (getLangOpts().CPlusPlus11)
Diag(PointOfInstantiation, diag::note_inst_declaration_hint) << Var;
}
+ return;
}
- return;
}
- TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
+ // FIXME: We need to track the instantiation stack in order to know which
+ // definitions should be visible within this instantiation.
+ // FIXME: Produce diagnostics when Var->getInstantiatedFromStaticDataMember().
+ if (DiagnoseUninstantiableTemplate(PointOfInstantiation, Var,
+ /*InstantiatedFromMember*/false,
+ PatternDecl, Def, TSK,
+ /*Complain*/DefinitionRequired))
+ return;
+
// Never instantiate an explicit specialization.
if (TSK == TSK_ExplicitSpecialization)
@@ -4483,22 +4603,36 @@ static bool isInstantiationOf(UsingShadowDecl *Pattern,
Pattern);
}
-static bool isInstantiationOf(UsingDecl *Pattern,
- UsingDecl *Instance,
+static bool isInstantiationOf(UsingDecl *Pattern, UsingDecl *Instance,
ASTContext &C) {
return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern);
}
-static bool isInstantiationOf(UnresolvedUsingValueDecl *Pattern,
- UsingDecl *Instance,
- ASTContext &C) {
- return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern);
-}
-
-static bool isInstantiationOf(UnresolvedUsingTypenameDecl *Pattern,
- UsingDecl *Instance,
- ASTContext &C) {
- return declaresSameEntity(C.getInstantiatedFromUsingDecl(Instance), Pattern);
+template<typename T>
+static bool isInstantiationOfUnresolvedUsingDecl(T *Pattern, Decl *Other,
+ ASTContext &Ctx) {
+ // An unresolved using declaration can instantiate to an unresolved using
+ // declaration, or to a using declaration or a using declaration pack.
+ //
+ // Multiple declarations can claim to be instantiated from an unresolved
+ // using declaration if it's a pack expansion. We want the UsingPackDecl
+ // in that case, not the individual UsingDecls within the pack.
+ bool OtherIsPackExpansion;
+ NamedDecl *OtherFrom;
+ if (auto *OtherUUD = dyn_cast<T>(Other)) {
+ OtherIsPackExpansion = OtherUUD->isPackExpansion();
+ OtherFrom = Ctx.getInstantiatedFromUsingDecl(OtherUUD);
+ } else if (auto *OtherUPD = dyn_cast<UsingPackDecl>(Other)) {
+ OtherIsPackExpansion = true;
+ OtherFrom = OtherUPD->getInstantiatedFromUsingDecl();
+ } else if (auto *OtherUD = dyn_cast<UsingDecl>(Other)) {
+ OtherIsPackExpansion = false;
+ OtherFrom = Ctx.getInstantiatedFromUsingDecl(OtherUD);
+ } else {
+ return false;
+ }
+ return Pattern->isPackExpansion() == OtherIsPackExpansion &&
+ declaresSameEntity(OtherFrom, Pattern);
}
static bool isInstantiationOfStaticDataMember(VarDecl *Pattern,
@@ -4519,49 +4653,40 @@ static bool isInstantiationOfStaticDataMember(VarDecl *Pattern,
// Other is the prospective instantiation
// D is the prospective pattern
static bool isInstantiationOf(ASTContext &Ctx, NamedDecl *D, Decl *Other) {
- if (D->getKind() != Other->getKind()) {
- if (UnresolvedUsingTypenameDecl *UUD
- = dyn_cast<UnresolvedUsingTypenameDecl>(D)) {
- if (UsingDecl *UD = dyn_cast<UsingDecl>(Other)) {
- return isInstantiationOf(UUD, UD, Ctx);
- }
- }
+ if (auto *UUD = dyn_cast<UnresolvedUsingTypenameDecl>(D))
+ return isInstantiationOfUnresolvedUsingDecl(UUD, Other, Ctx);
- if (UnresolvedUsingValueDecl *UUD
- = dyn_cast<UnresolvedUsingValueDecl>(D)) {
- if (UsingDecl *UD = dyn_cast<UsingDecl>(Other)) {
- return isInstantiationOf(UUD, UD, Ctx);
- }
- }
+ if (auto *UUD = dyn_cast<UnresolvedUsingValueDecl>(D))
+ return isInstantiationOfUnresolvedUsingDecl(UUD, Other, Ctx);
+ if (D->getKind() != Other->getKind())
return false;
- }
- if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Other))
+ if (auto *Record = dyn_cast<CXXRecordDecl>(Other))
return isInstantiationOf(cast<CXXRecordDecl>(D), Record);
- if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Other))
+ if (auto *Function = dyn_cast<FunctionDecl>(Other))
return isInstantiationOf(cast<FunctionDecl>(D), Function);
- if (EnumDecl *Enum = dyn_cast<EnumDecl>(Other))
+ if (auto *Enum = dyn_cast<EnumDecl>(Other))
return isInstantiationOf(cast<EnumDecl>(D), Enum);
- if (VarDecl *Var = dyn_cast<VarDecl>(Other))
+ if (auto *Var = dyn_cast<VarDecl>(Other))
if (Var->isStaticDataMember())
return isInstantiationOfStaticDataMember(cast<VarDecl>(D), Var);
- if (ClassTemplateDecl *Temp = dyn_cast<ClassTemplateDecl>(Other))
+ if (auto *Temp = dyn_cast<ClassTemplateDecl>(Other))
return isInstantiationOf(cast<ClassTemplateDecl>(D), Temp);
- if (FunctionTemplateDecl *Temp = dyn_cast<FunctionTemplateDecl>(Other))
+ if (auto *Temp = dyn_cast<FunctionTemplateDecl>(Other))
return isInstantiationOf(cast<FunctionTemplateDecl>(D), Temp);
- if (ClassTemplatePartialSpecializationDecl *PartialSpec
- = dyn_cast<ClassTemplatePartialSpecializationDecl>(Other))
+ if (auto *PartialSpec =
+ dyn_cast<ClassTemplatePartialSpecializationDecl>(Other))
return isInstantiationOf(cast<ClassTemplatePartialSpecializationDecl>(D),
PartialSpec);
- if (FieldDecl *Field = dyn_cast<FieldDecl>(Other)) {
+ if (auto *Field = dyn_cast<FieldDecl>(Other)) {
if (!Field->getDeclName()) {
// This is an unnamed field.
return declaresSameEntity(Ctx.getInstantiatedFromUnnamedFieldDecl(Field),
@@ -4569,14 +4694,14 @@ static bool isInstantiationOf(ASTContext &Ctx, NamedDecl *D, Decl *Other) {
}
}
- if (UsingDecl *Using = dyn_cast<UsingDecl>(Other))
+ if (auto *Using = dyn_cast<UsingDecl>(Other))
return isInstantiationOf(cast<UsingDecl>(D), Using, Ctx);
- if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(Other))
+ if (auto *Shadow = dyn_cast<UsingShadowDecl>(Other))
return isInstantiationOf(cast<UsingShadowDecl>(D), Shadow, Ctx);
- return D->getDeclName() && isa<NamedDecl>(Other) &&
- D->getDeclName() == cast<NamedDecl>(Other)->getDeclName();
+ return D->getDeclName() &&
+ D->getDeclName() == cast<NamedDecl>(Other)->getDeclName();
}
template<typename ForwardIterator>
@@ -4812,6 +4937,8 @@ NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
}
NamedDecl *Result = nullptr;
+ // FIXME: If the name is a dependent name, this lookup won't necessarily
+ // find it. Does that ever matter?
if (D->getDeclName()) {
DeclContext::lookup_result Found = ParentDC->lookup(D->getDeclName());
Result = findInstantiationOf(Context, D, Found.begin(), Found.end());
diff --git a/lib/Sema/SemaTemplateVariadic.cpp b/lib/Sema/SemaTemplateVariadic.cpp
index 06afe87f515e..54556b505ee0 100644
--- a/lib/Sema/SemaTemplateVariadic.cpp
+++ b/lib/Sema/SemaTemplateVariadic.cpp
@@ -390,21 +390,18 @@ void Sema::collectUnexpandedParameterPacks(QualType T,
void Sema::collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(TL);
-}
+}
-void Sema::collectUnexpandedParameterPacks(CXXScopeSpec &SS,
- SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
- NestedNameSpecifier *Qualifier = SS.getScopeRep();
- if (!Qualifier)
- return;
-
- NestedNameSpecifierLoc QualifierLoc(Qualifier, SS.location_data());
+void Sema::collectUnexpandedParameterPacks(
+ NestedNameSpecifierLoc NNS,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
CollectUnexpandedParameterPacksVisitor(Unexpanded)
- .TraverseNestedNameSpecifierLoc(QualifierLoc);
+ .TraverseNestedNameSpecifierLoc(NNS);
}
-void Sema::collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
- SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+void Sema::collectUnexpandedParameterPacks(
+ const DeclarationNameInfo &NameInfo,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
CollectUnexpandedParameterPacksVisitor(Unexpanded)
.TraverseDeclarationNameInfo(NameInfo);
}
@@ -639,7 +636,7 @@ bool Sema::CheckParameterPacksForExpansion(
return true;
}
}
-
+
return false;
}
@@ -772,7 +769,7 @@ bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
}
if (Chunk.Fun.getExceptionSpecType() == EST_Dynamic) {
- for (unsigned i = 0; i != Chunk.Fun.NumExceptions; ++i) {
+ for (unsigned i = 0; i != Chunk.Fun.getNumExceptions(); ++i) {
if (Chunk.Fun.Exceptions[i]
.Ty.get()
->containsUnexpandedParameterPack())
@@ -936,12 +933,71 @@ Sema::getTemplateArgumentPackExpansionPattern(
llvm_unreachable("Invalid TemplateArgument Kind!");
}
+Optional<unsigned> Sema::getFullyPackExpandedSize(TemplateArgument Arg) {
+ assert(Arg.containsUnexpandedParameterPack());
+
+ // If this is a substituted pack, grab that pack. If not, we don't know
+ // the size yet.
+ // FIXME: We could find a size in more cases by looking for a substituted
+ // pack anywhere within this argument, but that's not necessary in the common
+ // case for 'sizeof...(A)' handling.
+ TemplateArgument Pack;
+ switch (Arg.getKind()) {
+ case TemplateArgument::Type:
+ if (auto *Subst = Arg.getAsType()->getAs<SubstTemplateTypeParmPackType>())
+ Pack = Subst->getArgumentPack();
+ else
+ return None;
+ break;
+
+ case TemplateArgument::Expression:
+ if (auto *Subst =
+ dyn_cast<SubstNonTypeTemplateParmPackExpr>(Arg.getAsExpr()))
+ Pack = Subst->getArgumentPack();
+ else if (auto *Subst = dyn_cast<FunctionParmPackExpr>(Arg.getAsExpr())) {
+ for (ParmVarDecl *PD : *Subst)
+ if (PD->isParameterPack())
+ return None;
+ return Subst->getNumExpansions();
+ } else
+ return None;
+ break;
+
+ case TemplateArgument::Template:
+ if (SubstTemplateTemplateParmPackStorage *Subst =
+ Arg.getAsTemplate().getAsSubstTemplateTemplateParmPack())
+ Pack = Subst->getArgumentPack();
+ else
+ return None;
+ break;
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::NullPtr:
+ case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Null:
+ return None;
+ }
+
+ // Check that no argument in the pack is itself a pack expansion.
+ for (TemplateArgument Elem : Pack.pack_elements()) {
+ // There's no point recursing in this case; we would have already
+ // expanded this pack expansion into the enclosing pack if we could.
+ if (Elem.isPackExpansion())
+ return None;
+ }
+ return Pack.pack_size();
+}
+
static void CheckFoldOperand(Sema &S, Expr *E) {
if (!E)
return;
E = E->IgnoreImpCasts();
- if (isa<BinaryOperator>(E) || isa<AbstractConditionalOperator>(E)) {
+ auto *OCE = dyn_cast<CXXOperatorCallExpr>(E);
+ if ((OCE && OCE->isInfixBinaryOp()) || isa<BinaryOperator>(E) ||
+ isa<AbstractConditionalOperator>(E)) {
S.Diag(E->getExprLoc(), diag::err_fold_expression_bad_operand)
<< E->getSourceRange()
<< FixItHint::CreateInsertion(E->getLocStart(), "(")
diff --git a/lib/Sema/SemaType.cpp b/lib/Sema/SemaType.cpp
index f3747eaa5cb5..ae9a3ee790e1 100644
--- a/lib/Sema/SemaType.cpp
+++ b/lib/Sema/SemaType.cpp
@@ -106,6 +106,7 @@ static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
case AttributeList::AT_FastCall: \
case AttributeList::AT_StdCall: \
case AttributeList::AT_ThisCall: \
+ case AttributeList::AT_RegCall: \
case AttributeList::AT_Pascal: \
case AttributeList::AT_SwiftCall: \
case AttributeList::AT_VectorCall: \
@@ -717,6 +718,7 @@ static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
/*NumExceptions=*/0,
/*NoexceptExpr=*/nullptr,
/*ExceptionSpecTokens=*/nullptr,
+ /*DeclsInPrototype=*/None,
loc, loc, declarator));
// For consistency, make sure the state still has us as processing
@@ -1000,55 +1002,27 @@ static QualType applyObjCTypeArgs(Sema &S, SourceLocation loc, QualType type,
return S.Context.getObjCObjectType(type, finalTypeArgs, { }, false);
}
-/// Apply Objective-C protocol qualifiers to the given type.
-static QualType applyObjCProtocolQualifiers(
- Sema &S, SourceLocation loc, SourceRange range, QualType type,
- ArrayRef<ObjCProtocolDecl *> protocols,
- const SourceLocation *protocolLocs,
- bool failOnError = false) {
- ASTContext &ctx = S.Context;
- if (const ObjCObjectType *objT = dyn_cast<ObjCObjectType>(type.getTypePtr())){
- // FIXME: Check for protocols to which the class type is already
- // known to conform.
-
- return ctx.getObjCObjectType(objT->getBaseType(),
- objT->getTypeArgsAsWritten(),
- protocols,
- objT->isKindOfTypeAsWritten());
- }
-
- if (type->isObjCObjectType()) {
- // Silently overwrite any existing protocol qualifiers.
- // TODO: determine whether that's the right thing to do.
-
- // FIXME: Check for protocols to which the class type is already
- // known to conform.
- return ctx.getObjCObjectType(type, { }, protocols, false);
- }
-
- // id<protocol-list>
- if (type->isObjCIdType()) {
- const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>();
- type = ctx.getObjCObjectType(ctx.ObjCBuiltinIdTy, { }, protocols,
- objPtr->isKindOfType());
- return ctx.getObjCObjectPointerType(type);
- }
-
- // Class<protocol-list>
- if (type->isObjCClassType()) {
- const ObjCObjectPointerType *objPtr = type->castAs<ObjCObjectPointerType>();
- type = ctx.getObjCObjectType(ctx.ObjCBuiltinClassTy, { }, protocols,
- objPtr->isKindOfType());
- return ctx.getObjCObjectPointerType(type);
+QualType Sema::BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
+ SourceLocation ProtocolLAngleLoc,
+ ArrayRef<ObjCProtocolDecl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs,
+ SourceLocation ProtocolRAngleLoc,
+ bool FailOnError) {
+ QualType Result = QualType(Decl->getTypeForDecl(), 0);
+ if (!Protocols.empty()) {
+ bool HasError;
+ Result = Context.applyObjCProtocolQualifiers(Result, Protocols,
+ HasError);
+ if (HasError) {
+ Diag(SourceLocation(), diag::err_invalid_protocol_qualifiers)
+ << SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc);
+ if (FailOnError) Result = QualType();
+ }
+ if (FailOnError && Result.isNull())
+ return QualType();
}
- S.Diag(loc, diag::err_invalid_protocol_qualifiers)
- << range;
-
- if (failOnError)
- return QualType();
-
- return type;
+ return Result;
}
QualType Sema::BuildObjCObjectType(QualType BaseType,
@@ -1072,12 +1046,14 @@ QualType Sema::BuildObjCObjectType(QualType BaseType,
}
if (!Protocols.empty()) {
- Result = applyObjCProtocolQualifiers(*this, Loc,
- SourceRange(ProtocolLAngleLoc,
- ProtocolRAngleLoc),
- Result, Protocols,
- ProtocolLocs.data(),
- FailOnError);
+ bool HasError;
+ Result = Context.applyObjCProtocolQualifiers(Result, Protocols,
+ HasError);
+ if (HasError) {
+ Diag(Loc, diag::err_invalid_protocol_qualifiers)
+ << SourceRange(ProtocolLAngleLoc, ProtocolRAngleLoc);
+ if (FailOnError) Result = QualType();
+ }
if (FailOnError && Result.isNull())
return QualType();
}
@@ -1153,7 +1129,7 @@ TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers(
ActualTypeArgInfos.clear();
break;
}
-
+
assert(TypeArgInfo && "No type source info?");
ActualTypeArgInfos.push_back(TypeArgInfo);
}
@@ -1170,7 +1146,7 @@ TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers(
if (Result == T)
return BaseType;
-
+
// Create source information for this type.
TypeSourceInfo *ResultTInfo = Context.CreateTypeSourceInfo(Result);
TypeLoc ResultTL = ResultTInfo->getTypeLoc();
@@ -1183,6 +1159,20 @@ TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers(
ResultTL = ObjCObjectPointerTL.getPointeeLoc();
}
+ if (auto OTPTL = ResultTL.getAs<ObjCTypeParamTypeLoc>()) {
+ // Protocol qualifier information.
+ if (OTPTL.getNumProtocols() > 0) {
+ assert(OTPTL.getNumProtocols() == Protocols.size());
+ OTPTL.setProtocolLAngleLoc(ProtocolLAngleLoc);
+ OTPTL.setProtocolRAngleLoc(ProtocolRAngleLoc);
+ for (unsigned i = 0, n = Protocols.size(); i != n; ++i)
+ OTPTL.setProtocolLoc(i, ProtocolLocs[i]);
+ }
+
+ // We're done. Return the completed type to the parser.
+ return CreateParsedType(Result, ResultTInfo);
+ }
+
auto ObjCObjectTL = ResultTL.castAs<ObjCObjectTypeLoc>();
// Type argument information.
@@ -1220,19 +1210,19 @@ TypeResult Sema::actOnObjCTypeArgsAndProtocolQualifiers(
return CreateParsedType(Result, ResultTInfo);
}
-static StringRef getImageAccessAttrStr(AttributeList *attrs) {
- if (attrs) {
-
- AttributeList *Next;
+static OpenCLAccessAttr::Spelling getImageAccess(const AttributeList *Attrs) {
+ if (Attrs) {
+ const AttributeList *Next = Attrs;
do {
- AttributeList &Attr = *attrs;
+ const AttributeList &Attr = *Next;
Next = Attr.getNext();
if (Attr.getKind() == AttributeList::AT_OpenCLAccess) {
- return Attr.getName()->getName();
+ return static_cast<OpenCLAccessAttr::Spelling>(
+ Attr.getSemanticSpelling());
}
} while (Next);
}
- return "";
+ return OpenCLAccessAttr::Keyword_read_only;
}
/// \brief Convert the specified declspec to the appropriate type
@@ -1411,14 +1401,6 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = Context.LongDoubleTy;
else
Result = Context.DoubleTy;
-
- if (S.getLangOpts().OpenCL &&
- !((S.getLangOpts().OpenCLVersion >= 120) ||
- S.getOpenCLOptions().cl_khr_fp64)) {
- S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
- << Result << "cl_khr_fp64";
- declarator.setInvalidType(true);
- }
break;
case DeclSpec::TST_float128:
if (!S.Context.getTargetInfo().hasFloat128Type())
@@ -1470,48 +1452,6 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
Result = S.GetTypeFromParser(DS.getRepAsType());
if (Result.isNull()) {
declarator.setInvalidType(true);
- } else if (S.getLangOpts().OpenCL) {
- if (Result->getAs<AtomicType>()) {
- StringRef TypeName = Result.getBaseTypeIdentifier()->getName();
- bool NoExtTypes =
- llvm::StringSwitch<bool>(TypeName)
- .Cases("atomic_int", "atomic_uint", "atomic_float",
- "atomic_flag", true)
- .Default(false);
- if (!S.getOpenCLOptions().cl_khr_int64_base_atomics && !NoExtTypes) {
- S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
- << Result << "cl_khr_int64_base_atomics";
- declarator.setInvalidType(true);
- }
- if (!S.getOpenCLOptions().cl_khr_int64_extended_atomics &&
- !NoExtTypes) {
- S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
- << Result << "cl_khr_int64_extended_atomics";
- declarator.setInvalidType(true);
- }
- if (!S.getOpenCLOptions().cl_khr_fp64 &&
- !TypeName.compare("atomic_double")) {
- S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
- << Result << "cl_khr_fp64";
- declarator.setInvalidType(true);
- }
- } else if (!S.getOpenCLOptions().cl_khr_gl_msaa_sharing &&
- (Result->isOCLImage2dArrayMSAADepthROType() ||
- Result->isOCLImage2dArrayMSAADepthWOType() ||
- Result->isOCLImage2dArrayMSAADepthRWType() ||
- Result->isOCLImage2dArrayMSAAROType() ||
- Result->isOCLImage2dArrayMSAARWType() ||
- Result->isOCLImage2dArrayMSAAWOType() ||
- Result->isOCLImage2dMSAADepthROType() ||
- Result->isOCLImage2dMSAADepthRWType() ||
- Result->isOCLImage2dMSAADepthWOType() ||
- Result->isOCLImage2dMSAAROType() ||
- Result->isOCLImage2dMSAARWType() ||
- Result->isOCLImage2dMSAAWOType())) {
- S.Diag(DS.getTypeSpecTypeLoc(), diag::err_type_requires_extension)
- << Result << "cl_khr_gl_msaa_sharing";
- declarator.setInvalidType(true);
- }
}
// TypeQuals handled by caller.
@@ -1623,11 +1563,14 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
#define GENERIC_IMAGE_TYPE(ImgType, Id) \
case DeclSpec::TST_##ImgType##_t: \
- Result = llvm::StringSwitch<QualType>( \
- getImageAccessAttrStr(DS.getAttributes().getList())) \
- .Cases("write_only", "__write_only", Context.Id##WOTy) \
- .Cases("read_write", "__read_write", Context.Id##RWTy) \
- .Default(Context.Id##ROTy); \
+ switch (getImageAccess(DS.getAttributes().getList())) { \
+ case OpenCLAccessAttr::Keyword_write_only: \
+ Result = Context.Id##WOTy; break; \
+ case OpenCLAccessAttr::Keyword_read_write: \
+ Result = Context.Id##RWTy; break; \
+ case OpenCLAccessAttr::Keyword_read_only: \
+ Result = Context.Id##ROTy; break; \
+ } \
break;
#include "clang/Basic/OpenCLImageTypes.def"
@@ -1637,6 +1580,10 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
}
+ if (S.getLangOpts().OpenCL &&
+ S.checkOpenCLDisabledTypeDeclSpec(DS, Result))
+ declarator.setInvalidType(true);
+
// Handle complex types.
if (DS.getTypeSpecComplex() == DeclSpec::TSC_complex) {
if (S.getLangOpts().Freestanding)
@@ -1748,6 +1695,12 @@ QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
if (T.isNull())
return QualType();
+ // Ignore any attempt to form a cv-qualified reference.
+ if (T->isReferenceType()) {
+ Qs.removeConst();
+ Qs.removeVolatile();
+ }
+
// Enforce C99 6.7.3p2: "Types other than pointer types derived from
// object or incomplete types shall not be restrict-qualified."
if (Qs.hasRestrict()) {
@@ -1789,6 +1742,11 @@ QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
if (T.isNull())
return QualType();
+ // Ignore any attempt to form a cv-qualified reference.
+ if (T->isReferenceType())
+ CVRAU &=
+ ~(DeclSpec::TQ_const | DeclSpec::TQ_volatile | DeclSpec::TQ_atomic);
+
// Convert from DeclSpec::TQ to Qualifiers::TQ by just dropping TQ_atomic and
// TQ_unaligned;
unsigned CVR = CVRAU & ~(DeclSpec::TQ_atomic | DeclSpec::TQ_unaligned);
@@ -2030,7 +1988,7 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
return Context.getRValueReferenceType(T);
}
-/// \brief Build a Pipe type.
+/// \brief Build a Read-only Pipe type.
///
/// \param T The type to which we'll be building a Pipe.
///
@@ -2038,11 +1996,20 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
///
/// \returns A suitable pipe type, if there are no errors. Otherwise, returns a
/// NULL type.
-QualType Sema::BuildPipeType(QualType T, SourceLocation Loc) {
- assert(!T->isObjCObjectType() && "Should build ObjCObjectPointerType");
+QualType Sema::BuildReadPipeType(QualType T, SourceLocation Loc) {
+ return Context.getReadPipeType(T);
+}
- // Build the pipe type.
- return Context.getPipeType(T);
+/// \brief Build a Write-only Pipe type.
+///
+/// \param T The type to which we'll be building a Pipe.
+///
+/// \param Loc We do not use it for now.
+///
+/// \returns A suitable pipe type, if there are no errors. Otherwise, returns a
+/// NULL type.
+QualType Sema::BuildWritePipeType(QualType T, SourceLocation Loc) {
+ return Context.getWritePipeType(T);
}
/// Check whether the specified array size makes the array type a VLA. If so,
@@ -2242,6 +2209,10 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Diag(Loc, diag::err_opencl_vla);
return QualType();
}
+ // CUDA device code doesn't support VLAs.
+ if (getLangOpts().CUDA && T->isVariableArrayType())
+ CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget();
+
// If this is not C99, extwarn about VLA's and C99 array size modifiers.
if (!getLangOpts().C99) {
if (T->isVariableArrayType()) {
@@ -2390,28 +2361,16 @@ static void checkExtParameterInfos(Sema &S, ArrayRef<QualType> paramTypes,
}
continue;
- // swift_context parameters must be the last parameter except for
- // a possible swift_error parameter.
case ParameterABI::SwiftContext:
checkForSwiftCC(paramIndex);
- if (!(paramIndex == numParams - 1 ||
- (paramIndex == numParams - 2 &&
- EPI.ExtParameterInfos[numParams - 1].getABI()
- == ParameterABI::SwiftErrorResult))) {
- S.Diag(getParamLoc(paramIndex),
- diag::err_swift_context_not_before_swift_error_result);
- }
continue;
- // swift_error parameters must be the last parameter.
+ // swift_error parameters must be preceded by a swift_context parameter.
case ParameterABI::SwiftErrorResult:
checkForSwiftCC(paramIndex);
- if (paramIndex != numParams - 1) {
- S.Diag(getParamLoc(paramIndex),
- diag::err_swift_error_result_not_last);
- } else if (paramIndex == 0 ||
- EPI.ExtParameterInfos[paramIndex - 1].getABI()
- != ParameterABI::SwiftContext) {
+ if (paramIndex == 0 ||
+ EPI.ExtParameterInfos[paramIndex - 1].getABI() !=
+ ParameterABI::SwiftContext) {
S.Diag(getParamLoc(paramIndex),
diag::err_swift_error_result_not_after_swift_context);
}
@@ -2855,7 +2814,8 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
Error = 7; // Exception declaration
break;
case Declarator::TemplateParamContext:
- Error = 8; // Template parameter
+ if (!SemaRef.getLangOpts().CPlusPlus1z)
+ Error = 8; // Template parameter
break;
case Declarator::BlockLiteralContext:
Error = 9; // Block literal
@@ -3212,6 +3172,7 @@ namespace {
Pointer,
BlockPointer,
MemberPointer,
+ Array,
};
} // end anonymous namespace
@@ -3273,15 +3234,27 @@ namespace {
// NSError**
NSErrorPointerPointer,
};
+
+ /// Describes a declarator chunk wrapping a pointer that marks inference as
+ /// unexpected.
+ // These values must be kept in sync with diagnostics.
+ enum class PointerWrappingDeclaratorKind {
+ /// Pointer is top-level.
+ None = -1,
+ /// Pointer is an array element.
+ Array = 0,
+ /// Pointer is the referent type of a C++ reference.
+ Reference = 1
+ };
} // end anonymous namespace
/// Classify the given declarator, whose type-specified is \c type, based on
/// what kind of pointer it refers to.
///
/// This is used to determine the default nullability.
-static PointerDeclaratorKind classifyPointerDeclarator(Sema &S,
- QualType type,
- Declarator &declarator) {
+static PointerDeclaratorKind
+classifyPointerDeclarator(Sema &S, QualType type, Declarator &declarator,
+ PointerWrappingDeclaratorKind &wrappingKind) {
unsigned numNormalPointers = 0;
// For any dependent type, we consider it a non-pointer.
@@ -3293,6 +3266,10 @@ static PointerDeclaratorKind classifyPointerDeclarator(Sema &S,
DeclaratorChunk &chunk = declarator.getTypeObject(i);
switch (chunk.Kind) {
case DeclaratorChunk::Array:
+ if (numNormalPointers == 0)
+ wrappingKind = PointerWrappingDeclaratorKind::Array;
+ break;
+
case DeclaratorChunk::Function:
case DeclaratorChunk::Pipe:
break;
@@ -3303,14 +3280,18 @@ static PointerDeclaratorKind classifyPointerDeclarator(Sema &S,
: PointerDeclaratorKind::SingleLevelPointer;
case DeclaratorChunk::Paren:
+ break;
+
case DeclaratorChunk::Reference:
- continue;
+ if (numNormalPointers == 0)
+ wrappingKind = PointerWrappingDeclaratorKind::Reference;
+ break;
case DeclaratorChunk::Pointer:
++numNormalPointers;
if (numNormalPointers > 2)
return PointerDeclaratorKind::MultiLevelPointer;
- continue;
+ break;
}
}
@@ -3453,12 +3434,77 @@ static FileID getNullabilityCompletenessCheckFileID(Sema &S,
return file;
}
-/// Check for consistent use of nullability.
-static void checkNullabilityConsistency(TypeProcessingState &state,
+/// Creates a fix-it to insert a C-style nullability keyword at \p pointerLoc,
+/// taking into account whitespace before and after.
+static void fixItNullability(Sema &S, DiagnosticBuilder &Diag,
+ SourceLocation PointerLoc,
+ NullabilityKind Nullability) {
+ assert(PointerLoc.isValid());
+ if (PointerLoc.isMacroID())
+ return;
+
+ SourceLocation FixItLoc = S.getLocForEndOfToken(PointerLoc);
+ if (!FixItLoc.isValid() || FixItLoc == PointerLoc)
+ return;
+
+ const char *NextChar = S.SourceMgr.getCharacterData(FixItLoc);
+ if (!NextChar)
+ return;
+
+ SmallString<32> InsertionTextBuf{" "};
+ InsertionTextBuf += getNullabilitySpelling(Nullability);
+ InsertionTextBuf += " ";
+ StringRef InsertionText = InsertionTextBuf.str();
+
+ if (isWhitespace(*NextChar)) {
+ InsertionText = InsertionText.drop_back();
+ } else if (NextChar[-1] == '[') {
+ if (NextChar[0] == ']')
+ InsertionText = InsertionText.drop_back().drop_front();
+ else
+ InsertionText = InsertionText.drop_front();
+ } else if (!isIdentifierBody(NextChar[0], /*allow dollar*/true) &&
+ !isIdentifierBody(NextChar[-1], /*allow dollar*/true)) {
+ InsertionText = InsertionText.drop_back().drop_front();
+ }
+
+ Diag << FixItHint::CreateInsertion(FixItLoc, InsertionText);
+}
+
+static void emitNullabilityConsistencyWarning(Sema &S,
+ SimplePointerKind PointerKind,
+ SourceLocation PointerLoc) {
+ assert(PointerLoc.isValid());
+
+ if (PointerKind == SimplePointerKind::Array) {
+ S.Diag(PointerLoc, diag::warn_nullability_missing_array);
+ } else {
+ S.Diag(PointerLoc, diag::warn_nullability_missing)
+ << static_cast<unsigned>(PointerKind);
+ }
+
+ if (PointerLoc.isMacroID())
+ return;
+
+ auto addFixIt = [&](NullabilityKind Nullability) {
+ auto Diag = S.Diag(PointerLoc, diag::note_nullability_fix_it);
+ Diag << static_cast<unsigned>(Nullability);
+ Diag << static_cast<unsigned>(PointerKind);
+ fixItNullability(S, Diag, PointerLoc, Nullability);
+ };
+ addFixIt(NullabilityKind::Nullable);
+ addFixIt(NullabilityKind::NonNull);
+}
+
+/// Complains about missing nullability if the file containing \p pointerLoc
+/// has other uses of nullability (either the keywords or the \c assume_nonnull
+/// pragma).
+///
+/// If the file has \e not seen other uses of nullability, this particular
+/// pointer is saved for possible later diagnosis. See recordNullabilitySeen().
+static void checkNullabilityConsistency(Sema &S,
SimplePointerKind pointerKind,
SourceLocation pointerLoc) {
- Sema &S = state.getSema();
-
// Determine which file we're performing consistency checking for.
FileID file = getNullabilityCompletenessCheckFileID(S, pointerLoc);
if (file.isInvalid())
@@ -3468,10 +3514,16 @@ static void checkNullabilityConsistency(TypeProcessingState &state,
// about anything.
FileNullability &fileNullability = S.NullabilityMap[file];
if (!fileNullability.SawTypeNullability) {
- // If this is the first pointer declarator in the file, record it.
+ // If this is the first pointer declarator in the file, and the appropriate
+ // warning is on, record it in case we need to diagnose it retroactively.
+ diag::kind diagKind;
+ if (pointerKind == SimplePointerKind::Array)
+ diagKind = diag::warn_nullability_missing_array;
+ else
+ diagKind = diag::warn_nullability_missing;
+
if (fileNullability.PointerLoc.isInvalid() &&
- !S.Context.getDiagnostics().isIgnored(diag::warn_nullability_missing,
- pointerLoc)) {
+ !S.Context.getDiagnostics().isIgnored(diagKind, pointerLoc)) {
fileNullability.PointerLoc = pointerLoc;
fileNullability.PointerKind = static_cast<unsigned>(pointerKind);
}
@@ -3480,8 +3532,66 @@ static void checkNullabilityConsistency(TypeProcessingState &state,
}
// Complain about missing nullability.
- S.Diag(pointerLoc, diag::warn_nullability_missing)
- << static_cast<unsigned>(pointerKind);
+ emitNullabilityConsistencyWarning(S, pointerKind, pointerLoc);
+}
+
+/// Marks that a nullability feature has been used in the file containing
+/// \p loc.
+///
+/// If this file already had pointer types in it that were missing nullability,
+/// the first such instance is retroactively diagnosed.
+///
+/// \sa checkNullabilityConsistency
+static void recordNullabilitySeen(Sema &S, SourceLocation loc) {
+ FileID file = getNullabilityCompletenessCheckFileID(S, loc);
+ if (file.isInvalid())
+ return;
+
+ FileNullability &fileNullability = S.NullabilityMap[file];
+ if (fileNullability.SawTypeNullability)
+ return;
+ fileNullability.SawTypeNullability = true;
+
+ // If we haven't seen any type nullability before, now we have. Retroactively
+ // diagnose the first unannotated pointer, if there was one.
+ if (fileNullability.PointerLoc.isInvalid())
+ return;
+
+ auto kind = static_cast<SimplePointerKind>(fileNullability.PointerKind);
+ emitNullabilityConsistencyWarning(S, kind, fileNullability.PointerLoc);
+}
+
+/// Returns true if any of the declarator chunks before \p endIndex include a
+/// level of indirection: array, pointer, reference, or pointer-to-member.
+///
+/// Because declarator chunks are stored in outer-to-inner order, testing
+/// every chunk before \p endIndex is testing all chunks that embed the current
+/// chunk as part of their type.
+///
+/// It is legal to pass the result of Declarator::getNumTypeObjects() as the
+/// end index, in which case all chunks are tested.
+static bool hasOuterPointerLikeChunk(const Declarator &D, unsigned endIndex) {
+ unsigned i = endIndex;
+ while (i != 0) {
+ // Walk outwards along the declarator chunks.
+ --i;
+ const DeclaratorChunk &DC = D.getTypeObject(i);
+ switch (DC.Kind) {
+ case DeclaratorChunk::Paren:
+ break;
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ return true;
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::Pipe:
+ // These are invalid anyway, so just ignore.
+ break;
+ }
+ }
+ return false;
}
static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
@@ -3561,24 +3671,10 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Are we in an assume-nonnull region?
bool inAssumeNonNullRegion = false;
- if (S.PP.getPragmaAssumeNonNullLoc().isValid()) {
+ SourceLocation assumeNonNullLoc = S.PP.getPragmaAssumeNonNullLoc();
+ if (assumeNonNullLoc.isValid()) {
inAssumeNonNullRegion = true;
- // Determine which file we saw the assume-nonnull region in.
- FileID file = getNullabilityCompletenessCheckFileID(
- S, S.PP.getPragmaAssumeNonNullLoc());
- if (file.isValid()) {
- FileNullability &fileNullability = S.NullabilityMap[file];
-
- // If we haven't seen any type nullability before, now we have.
- if (!fileNullability.SawTypeNullability) {
- if (fileNullability.PointerLoc.isValid()) {
- S.Diag(fileNullability.PointerLoc, diag::warn_nullability_missing)
- << static_cast<unsigned>(fileNullability.PointerKind);
- }
-
- fileNullability.SawTypeNullability = true;
- }
- }
+ recordNullabilitySeen(S, assumeNonNullLoc);
}
// Whether to complain about missing nullability specifiers or not.
@@ -3593,6 +3689,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
CAMN_Yes
} complainAboutMissingNullability = CAMN_No;
unsigned NumPointersRemaining = 0;
+ auto complainAboutInferringWithinChunk = PointerWrappingDeclaratorKind::None;
if (IsTypedefName) {
// For typedefs, we do not infer any nullability (the default),
@@ -3600,7 +3697,17 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// inner pointers.
complainAboutMissingNullability = CAMN_InnerPointers;
- if (T->canHaveNullability() && !T->getNullability(S.Context)) {
+ auto isDependentNonPointerType = [](QualType T) -> bool {
+ // Note: This is intended to be the same check as Type::canHaveNullability
+ // except with all of the ambiguous cases being treated as 'false' rather
+ // than 'true'.
+ return T->isDependentType() && !T->isAnyPointerType() &&
+ !T->isBlockPointerType() && !T->isMemberPointerType();
+ };
+
+ if (T->canHaveNullability() && !T->getNullability(S.Context) &&
+ !isDependentNonPointerType(T)) {
+ // Note that we allow but don't require nullability on dependent types.
++NumPointersRemaining;
}
@@ -3651,11 +3758,12 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// fallthrough
case Declarator::FileContext:
- case Declarator::KNRTypeListContext:
+ case Declarator::KNRTypeListContext: {
complainAboutMissingNullability = CAMN_Yes;
// Nullability inference depends on the type and declarator.
- switch (classifyPointerDeclarator(S, T, D)) {
+ auto wrappingKind = PointerWrappingDeclaratorKind::None;
+ switch (classifyPointerDeclarator(S, T, D, wrappingKind)) {
case PointerDeclaratorKind::NonPointer:
case PointerDeclaratorKind::MultiLevelPointer:
// Cannot infer nullability.
@@ -3664,6 +3772,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case PointerDeclaratorKind::SingleLevelPointer:
// Infer _Nonnull if we are in an assumes-nonnull region.
if (inAssumeNonNullRegion) {
+ complainAboutInferringWithinChunk = wrappingKind;
inferNullability = NullabilityKind::NonNull;
inferNullabilityCS = (context == Declarator::ObjCParameterContext ||
context == Declarator::ObjCResultContext);
@@ -3704,6 +3813,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
break;
}
break;
+ }
case Declarator::ConversionIdContext:
complainAboutMissingNullability = CAMN_Yes;
@@ -3729,6 +3839,23 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
}
+ // Local function that returns true if its argument looks like a va_list.
+ auto isVaList = [&S](QualType T) -> bool {
+ auto *typedefTy = T->getAs<TypedefType>();
+ if (!typedefTy)
+ return false;
+ TypedefDecl *vaListTypedef = S.Context.getBuiltinVaListDecl();
+ do {
+ if (typedefTy->getDecl() == vaListTypedef)
+ return true;
+ if (auto *name = typedefTy->getDecl()->getIdentifier())
+ if (name->isStr("va_list"))
+ return true;
+ typedefTy = typedefTy->desugar()->getAs<TypedefType>();
+ } while (typedefTy);
+ return false;
+ };
+
// Local function that checks the nullability for a given pointer declarator.
// Returns true if _Nonnull was inferred.
auto inferPointerNullability = [&](SimplePointerKind pointerKind,
@@ -3762,6 +3889,15 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
->setObjCDeclQualifier(ObjCDeclSpec::DQ_CSNullability);
}
+ if (pointerLoc.isValid() &&
+ complainAboutInferringWithinChunk !=
+ PointerWrappingDeclaratorKind::None) {
+ auto Diag =
+ S.Diag(pointerLoc, diag::warn_nullability_inferred_on_nested_type);
+ Diag << static_cast<int>(complainAboutInferringWithinChunk);
+ fixItNullability(S, Diag, pointerLoc, NullabilityKind::NonNull);
+ }
+
if (inferNullabilityInnerOnly)
inferNullabilityInnerOnlyComplete = true;
return nullabilityAttr;
@@ -3779,27 +3915,42 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Fallthrough.
case CAMN_Yes:
- checkNullabilityConsistency(state, pointerKind, pointerLoc);
+ checkNullabilityConsistency(S, pointerKind, pointerLoc);
}
return nullptr;
};
// If the type itself could have nullability but does not, infer pointer
// nullability and perform consistency checking.
- if (T->canHaveNullability() && S.ActiveTemplateInstantiations.empty() &&
- !T->getNullability(S.Context)) {
- SimplePointerKind pointerKind = SimplePointerKind::Pointer;
- if (T->isBlockPointerType())
- pointerKind = SimplePointerKind::BlockPointer;
- else if (T->isMemberPointerType())
- pointerKind = SimplePointerKind::MemberPointer;
+ if (S.ActiveTemplateInstantiations.empty()) {
+ if (T->canHaveNullability() && !T->getNullability(S.Context)) {
+ if (isVaList(T)) {
+ // Record that we've seen a pointer, but do nothing else.
+ if (NumPointersRemaining > 0)
+ --NumPointersRemaining;
+ } else {
+ SimplePointerKind pointerKind = SimplePointerKind::Pointer;
+ if (T->isBlockPointerType())
+ pointerKind = SimplePointerKind::BlockPointer;
+ else if (T->isMemberPointerType())
+ pointerKind = SimplePointerKind::MemberPointer;
+
+ if (auto *attr = inferPointerNullability(
+ pointerKind, D.getDeclSpec().getTypeSpecTypeLoc(),
+ D.getMutableDeclSpec().getAttributes().getListRef())) {
+ T = Context.getAttributedType(
+ AttributedType::getNullabilityAttrKind(*inferNullability),T,T);
+ attr->setUsedAsTypeAttr();
+ }
+ }
+ }
- if (auto *attr = inferPointerNullability(
- pointerKind, D.getDeclSpec().getTypeSpecTypeLoc(),
- D.getMutableDeclSpec().getAttributes().getListRef())) {
- T = Context.getAttributedType(
- AttributedType::getNullabilityAttrKind(*inferNullability), T, T);
- attr->setUsedAsTypeAttr();
+ if (complainAboutMissingNullability == CAMN_Yes &&
+ T->isArrayType() && !T->getNullability(S.Context) && !isVaList(T) &&
+ D.isPrototypeContext() &&
+ !hasOuterPointerLikeChunk(D, D.getNumTypeObjects())) {
+ checkNullabilityConsistency(S, SimplePointerKind::Array,
+ D.getDeclSpec().getTypeSpecTypeLoc());
}
}
@@ -3925,31 +4076,13 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// C99 6.7.5.2p1: ... and then only in the outermost array type
// derivation.
- unsigned x = chunkIndex;
- while (x != 0) {
- // Walk outwards along the declarator chunks.
- x--;
- const DeclaratorChunk &DC = D.getTypeObject(x);
- switch (DC.Kind) {
- case DeclaratorChunk::Paren:
- continue;
- case DeclaratorChunk::Array:
- case DeclaratorChunk::Pointer:
- case DeclaratorChunk::Reference:
- case DeclaratorChunk::MemberPointer:
- S.Diag(DeclType.Loc, diag::err_array_static_not_outermost) <<
- (ASM == ArrayType::Static ? "'static'" : "type qualifier");
- if (ASM == ArrayType::Static)
- ASM = ArrayType::Normal;
- ATI.TypeQuals = 0;
- D.setInvalidType(true);
- break;
- case DeclaratorChunk::Function:
- case DeclaratorChunk::BlockPointer:
- case DeclaratorChunk::Pipe:
- // These are invalid anyway, so just ignore.
- break;
- }
+ if (hasOuterPointerLikeChunk(D, chunkIndex)) {
+ S.Diag(DeclType.Loc, diag::err_array_static_not_outermost) <<
+ (ASM == ArrayType::Static ? "'static'" : "type qualifier");
+ if (ASM == ArrayType::Static)
+ ASM = ArrayType::Normal;
+ ATI.TypeQuals = 0;
+ D.setInvalidType(true);
}
}
const AutoType *AT = T->getContainedAutoType();
@@ -3964,6 +4097,16 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
break;
}
+ // Array parameters can be marked nullable as well, although it's not
+ // necessary if they're marked 'static'.
+ if (complainAboutMissingNullability == CAMN_Yes &&
+ !hasNullabilityAttr(DeclType.getAttrs()) &&
+ ASM != ArrayType::Static &&
+ D.isPrototypeContext() &&
+ !hasOuterPointerLikeChunk(D, chunkIndex)) {
+ checkNullabilityConsistency(S, SimplePointerKind::Array, DeclType.Loc);
+ }
+
T = S.BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals,
SourceRange(DeclType.Loc, DeclType.EndLoc), Name);
break;
@@ -4032,7 +4175,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// FIXME: This really should be in BuildFunctionType.
if (T->isHalfType()) {
if (S.getLangOpts().OpenCL) {
- if (!S.getOpenCLOptions().cl_khr_fp16) {
+ if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
S.Diag(D.getIdentifierLoc(), diag::err_opencl_invalid_return)
<< T << 0 /*pointer hint*/;
D.setInvalidType(true);
@@ -4044,13 +4187,26 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
}
+ if (LangOpts.OpenCL) {
// OpenCL v2.0 s6.12.5 - A block cannot be the return value of a
// function.
- if (LangOpts.OpenCL && (T->isBlockPointerType() || T->isImageType() ||
- T->isSamplerT() || T->isPipeType())) {
- S.Diag(D.getIdentifierLoc(), diag::err_opencl_invalid_return)
- << T << 1 /*hint off*/;
- D.setInvalidType(true);
+ if (T->isBlockPointerType() || T->isImageType() || T->isSamplerT() ||
+ T->isPipeType()) {
+ S.Diag(D.getIdentifierLoc(), diag::err_opencl_invalid_return)
+ << T << 1 /*hint off*/;
+ D.setInvalidType(true);
+ }
+ // OpenCL doesn't support variadic functions and blocks
+ // (s6.9.e and s6.12.5 OpenCL v2.0) except for printf.
+ // We also allow here any toolchain reserved identifiers.
+ if (FTI.isVariadic &&
+ !(D.getIdentifier() &&
+ ((D.getIdentifier()->getName() == "printf" &&
+ LangOpts.OpenCLVersion >= 120) ||
+ D.getIdentifier()->getName().startswith("__")))) {
+ S.Diag(D.getIdentifierLoc(), diag::err_opencl_variadic_function);
+ D.setInvalidType(true);
+ }
}
// Methods cannot return interface types. All ObjC objects are
@@ -4143,7 +4299,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Exception specs are not allowed in typedefs. Complain, but add it
// anyway.
- if (IsTypedefName && FTI.getExceptionSpecType())
+ if (IsTypedefName && FTI.getExceptionSpecType() && !LangOpts.CPlusPlus1z)
S.Diag(FTI.getExceptionSpecLocBeg(),
diag::err_exception_spec_in_typedef)
<< (D.getContext() == Declarator::AliasDeclContext ||
@@ -4154,6 +4310,19 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (FTI.isAmbiguous)
warnAboutAmbiguousFunction(S, D, DeclType, T);
+ // GNU warning -Wstrict-prototypes
+ // Warn if a function declaration is without a prototype.
+ // This warning is issued for all kinds of unprototyped function
+ // declarations (i.e. function type typedef, function pointer etc.)
+ // C99 6.7.5.3p14:
+ // The empty list in a function declarator that is not part of a
+ // definition of that function specifies that no information
+ // about the number or types of the parameters is supplied.
+ if (D.getFunctionDefinitionKind() == FDK_Declaration &&
+ FTI.NumParams == 0 && !LangOpts.CPlusPlus)
+ S.Diag(DeclType.Loc, diag::warn_strict_prototypes)
+ << 0 << FixItHint::CreateInsertion(FTI.getRParenLoc(), "void");
+
FunctionType::ExtInfo EI(getCCForDeclaratorChunk(S, D, FTI, chunkIndex));
if (!FTI.NumParams && !FTI.isVariadic && !LangOpts.CPlusPlus) {
@@ -4239,7 +4408,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Disallow half FP parameters.
// FIXME: This really should be in BuildFunctionType.
if (S.getLangOpts().OpenCL) {
- if (!S.getOpenCLOptions().cl_khr_fp16) {
+ if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
S.Diag(Param->getLocation(),
diag::err_opencl_half_param) << ParamTy;
D.setInvalidType();
@@ -4290,7 +4459,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
if (FTI.getExceptionSpecType() == EST_Dynamic) {
// FIXME: It's rather inefficient to have to split into two vectors
// here.
- unsigned N = FTI.NumExceptions;
+ unsigned N = FTI.getNumExceptions();
DynamicExceptions.reserve(N);
DynamicExceptionRanges.reserve(N);
for (unsigned I = 0; I != N; ++I) {
@@ -4374,7 +4543,9 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
case DeclaratorChunk::Pipe: {
- T = S.BuildPipeType(T, DeclType.Loc );
+ T = S.BuildReadPipeType(T, DeclType.Loc);
+ processTypeAttrs(state, T, TAL_DeclSpec,
+ D.getDeclSpec().getAttributes().getList());
break;
}
}
@@ -4738,6 +4909,8 @@ static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) {
return AttributeList::AT_StdCall;
case AttributedType::attr_thiscall:
return AttributeList::AT_ThisCall;
+ case AttributedType::attr_regcall:
+ return AttributeList::AT_RegCall;
case AttributedType::attr_pascal:
return AttributeList::AT_Pascal;
case AttributedType::attr_swiftcall:
@@ -4908,11 +5081,9 @@ namespace {
TL.getWrittenBuiltinSpecs() = DS.getWrittenBuiltinSpecs();
// Try to have a meaningful source location.
if (TL.getWrittenSignSpec() != TSS_unspecified)
- // Sign spec loc overrides the others (e.g., 'unsigned long').
- TL.setBuiltinLoc(DS.getTypeSpecSignLoc());
- else if (TL.getWrittenWidthSpec() != TSW_unspecified)
- // Width spec loc overrides type spec loc (e.g., 'short int').
- TL.setBuiltinLoc(DS.getTypeSpecWidthLoc());
+ TL.expandBuiltinRange(DS.getTypeSpecSignLoc());
+ if (TL.getWrittenWidthSpec() != TSW_unspecified)
+ TL.expandBuiltinRange(DS.getTypeSpecWidthRange());
}
}
void VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
@@ -5537,7 +5708,7 @@ static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
if (Class->isArcWeakrefUnavailable()) {
S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class);
S.Diag(ObjT->getInterfaceDecl()->getLocation(),
- diag::note_class_declared);
+ diag::note_class_declared);
}
}
}
@@ -5811,23 +5982,9 @@ static bool handleMSPointerTypeQualifierAttr(TypeProcessingState &State,
bool Sema::checkNullabilityTypeSpecifier(QualType &type,
NullabilityKind nullability,
SourceLocation nullabilityLoc,
- bool isContextSensitive) {
- // We saw a nullability type specifier. If this is the first one for
- // this file, note that.
- FileID file = getNullabilityCompletenessCheckFileID(*this, nullabilityLoc);
- if (!file.isInvalid()) {
- FileNullability &fileNullability = NullabilityMap[file];
- if (!fileNullability.SawTypeNullability) {
- // If we have already seen a pointer declarator without a nullability
- // annotation, complain about it.
- if (fileNullability.PointerLoc.isValid()) {
- Diag(fileNullability.PointerLoc, diag::warn_nullability_missing)
- << static_cast<unsigned>(fileNullability.PointerKind);
- }
-
- fileNullability.SawTypeNullability = true;
- }
- }
+ bool isContextSensitive,
+ bool allowOnArrayType) {
+ recordNullabilitySeen(*this, nullabilityLoc);
// Check for existing nullability attributes on the type.
QualType desugared = type;
@@ -5881,7 +6038,8 @@ bool Sema::checkNullabilityTypeSpecifier(QualType &type,
}
// If this definitely isn't a pointer type, reject the specifier.
- if (!desugared->canHaveNullability()) {
+ if (!desugared->canHaveNullability() &&
+ !(allowOnArrayType && desugared->isArrayType())) {
Diag(nullabilityLoc, diag::err_nullability_nonpointer)
<< DiagNullabilityKind(nullability, isContextSensitive) << type;
return true;
@@ -5891,7 +6049,12 @@ bool Sema::checkNullabilityTypeSpecifier(QualType &type,
// attributes, require that the type be a single-level pointer.
if (isContextSensitive) {
// Make sure that the pointee isn't itself a pointer type.
- QualType pointeeType = desugared->getPointeeType();
+ const Type *pointeeType;
+ if (desugared->isArrayType())
+ pointeeType = desugared->getArrayElementTypeNoTypeQual();
+ else
+ pointeeType = desugared->getPointeeType().getTypePtr();
+
if (pointeeType->isAnyPointerType() ||
pointeeType->isObjCObjectPointerType() ||
pointeeType->isMemberPointerType()) {
@@ -5914,6 +6077,13 @@ bool Sema::checkNullabilityTypeSpecifier(QualType &type,
}
bool Sema::checkObjCKindOfType(QualType &type, SourceLocation loc) {
+ if (isa<ObjCTypeParamType>(type)) {
+ // Build the attributed type to record where __kindof occurred.
+ type = Context.getAttributedType(AttributedType::attr_objc_kindof,
+ type, type);
+ return false;
+ }
+
// Find out if it's an Objective-C object or object pointer type;
const ObjCObjectPointerType *ptrType = type->getAs<ObjCObjectPointerType>();
const ObjCObjectType *objType = ptrType ? ptrType->getObjectType()
@@ -6070,6 +6240,8 @@ static AttributedType::Kind getCCTypeAttrKind(AttributeList &Attr) {
return AttributedType::attr_stdcall;
case AttributeList::AT_ThisCall:
return AttributedType::attr_thiscall;
+ case AttributeList::AT_RegCall:
+ return AttributedType::attr_regcall;
case AttributeList::AT_Pascal:
return AttributedType::attr_pascal;
case AttributeList::AT_SwiftCall:
@@ -6523,6 +6695,11 @@ static void HandleOpenCLAccessAttr(QualType &CurType, const AttributeList &Attr,
S.Diag(TypedefTy->getDecl()->getLocStart(),
diag::note_opencl_typedef_access_qualifier) << PrevAccessQual;
+ } else if (CurType->isPipeType()) {
+ if (Attr.getSemanticSpelling() == OpenCLAccessAttr::Keyword_write_only) {
+ QualType ElemType = CurType->getAs<PipeType>()->getElementType();
+ CurType = S.Context.getWritePipeType(ElemType);
+ }
}
}
@@ -6637,12 +6814,22 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// don't want to distribute the nullability specifier past any
// dependent type, because that complicates the user model.
if (type->canHaveNullability() || type->isDependentType() ||
+ type->isArrayType() ||
!distributeNullabilityTypeAttr(state, type, attr)) {
+ unsigned endIndex;
+ if (TAL == TAL_DeclChunk)
+ endIndex = state.getCurrentChunkIndex();
+ else
+ endIndex = state.getDeclarator().getNumTypeObjects();
+ bool allowOnArrayType =
+ state.getDeclarator().isPrototypeContext() &&
+ !hasOuterPointerLikeChunk(state.getDeclarator(), endIndex);
if (state.getSema().checkNullabilityTypeSpecifier(
type,
mapNullabilityAttrKind(attr.getKind()),
attr.getLoc(),
- attr.isContextSensitiveKeywordAttribute())) {
+ attr.isContextSensitiveKeywordAttribute(),
+ allowOnArrayType)) {
attr.setInvalid();
}
@@ -6879,6 +7066,14 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
return false;
}
D = ED->getDefinition();
+ } else if (auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (auto *Pattern = FD->getTemplateInstantiationPattern())
+ FD = Pattern;
+ D = FD->getDefinition();
+ } else if (auto *VD = dyn_cast<VarDecl>(D)) {
+ if (auto *Pattern = VD->getTemplateInstantiationPattern())
+ VD = Pattern;
+ D = VD->getDefinition();
}
assert(D && "missing definition for pattern of instantiated definition");
@@ -6886,7 +7081,7 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
if (isVisible(D))
return true;
- // The external source may have additional definitions of this type that are
+ // The external source may have additional definitions of this entity that are
// visible, so complete the redeclaration chain now and ask again.
if (auto *Source = Context.getExternalSource()) {
Source->CompleteRedeclChain(D);
diff --git a/lib/Sema/TreeTransform.h b/lib/Sema/TreeTransform.h
index 7224eef848de..3ab6019f0ec3 100644
--- a/lib/Sema/TreeTransform.h
+++ b/lib/Sema/TreeTransform.h
@@ -457,6 +457,10 @@ public:
return cast_or_null<NamedDecl>(getDerived().TransformDecl(Loc, D));
}
+ /// Transform the set of declarations in an OverloadExpr.
+ bool TransformOverloadExprDecls(OverloadExpr *Old, bool RequiresADL,
+ LookupResult &R);
+
/// \brief Transform the given nested-name-specifier with source-location
/// information.
///
@@ -699,6 +703,12 @@ public:
QualType RebuildMemberPointerType(QualType PointeeType, QualType ClassType,
SourceLocation Sigil);
+ QualType RebuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
+ SourceLocation ProtocolLAngleLoc,
+ ArrayRef<ObjCProtocolDecl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs,
+ SourceLocation ProtocolRAngleLoc);
+
/// \brief Build an Objective-C object type.
///
/// By default, performs semantic analysis when building the object type.
@@ -815,7 +825,7 @@ public:
/// \brief Rebuild an unresolved typename type, given the decl that
/// the UnresolvedUsingTypenameDecl was transformed to.
- QualType RebuildUnresolvedUsingType(Decl *D);
+ QualType RebuildUnresolvedUsingType(SourceLocation NameLoc, Decl *D);
/// \brief Build a new typedef type.
QualType RebuildTypedefType(TypedefNameDecl *Typedef) {
@@ -1007,11 +1017,9 @@ public:
case LookupResult::FoundOverloaded:
case LookupResult::FoundUnresolvedValue: {
NamedDecl *SomeDecl = Result.getRepresentativeDecl();
- unsigned Kind = 0;
- if (isa<TypedefDecl>(SomeDecl)) Kind = 1;
- else if (isa<TypeAliasDecl>(SomeDecl)) Kind = 2;
- else if (isa<ClassTemplateDecl>(SomeDecl)) Kind = 3;
- SemaRef.Diag(IdLoc, diag::err_tag_reference_non_tag) << Kind;
+ Sema::NonTagKind NTK = SemaRef.getNonTagTypeDeclKind(SomeDecl, Kind);
+ SemaRef.Diag(IdLoc, diag::err_tag_reference_non_tag) << SomeDecl
+ << NTK << Kind;
SemaRef.Diag(SomeDecl->getLocation(), diag::note_declared_at);
break;
}
@@ -1056,7 +1064,8 @@ public:
QualType RebuildAtomicType(QualType ValueType, SourceLocation KWLoc);
/// \brief Build a new pipe type given its value type.
- QualType RebuildPipeType(QualType ValueType, SourceLocation KWLoc);
+ QualType RebuildPipeType(QualType ValueType, SourceLocation KWLoc,
+ bool isReadPipe);
/// \brief Build a new template name given a nested name specifier, a flag
/// indicating whether the "template" keyword was provided, and the template
@@ -3216,6 +3225,9 @@ ExprResult TreeTransform<Derived>::TransformInitializer(Expr *Init,
if (ExprWithCleanups *ExprTemp = dyn_cast<ExprWithCleanups>(Init))
Init = ExprTemp->getSubExpr();
+ if (auto *AIL = dyn_cast<ArrayInitLoopExpr>(Init))
+ Init = AIL->getCommonExpr();
+
if (MaterializeTemporaryExpr *MTE = dyn_cast<MaterializeTemporaryExpr>(Init))
Init = MTE->GetTemporaryExpr();
@@ -3438,15 +3450,13 @@ TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
NestedNameSpecifier *QNNS = Q.getNestedNameSpecifier();
switch (QNNS->getKind()) {
- case NestedNameSpecifier::Identifier:
- if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/nullptr,
- *QNNS->getAsIdentifier(),
- Q.getLocalBeginLoc(),
- Q.getLocalEndLoc(),
- ObjectType, false, SS,
- FirstQualifierInScope, false))
+ case NestedNameSpecifier::Identifier: {
+ Sema::NestedNameSpecInfo IdInfo(QNNS->getAsIdentifier(),
+ Q.getLocalBeginLoc(), Q.getLocalEndLoc(), ObjectType);
+ if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/nullptr, IdInfo, false,
+ SS, FirstQualifierInScope, false))
return NestedNameSpecifierLoc();
-
+ }
break;
case NestedNameSpecifier::Namespace: {
@@ -5118,6 +5128,8 @@ bool TreeTransform<Derived>::TransformExceptionSpec(
}
ESI.Exceptions = Exceptions;
+ if (ESI.Exceptions.empty())
+ ESI.Type = EST_DynamicNone;
return false;
}
@@ -5153,7 +5165,7 @@ TreeTransform<Derived>::TransformUnresolvedUsingType(TypeLocBuilder &TLB,
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || D != T->getDecl()) {
- Result = getDerived().RebuildUnresolvedUsingType(D);
+ Result = getDerived().RebuildUnresolvedUsingType(TL.getNameLoc(), D);
if (Result.isNull())
return QualType();
}
@@ -5480,7 +5492,9 @@ QualType TreeTransform<Derived>::TransformPipeType(TypeLocBuilder &TLB,
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || ValueType != TL.getValueLoc().getType()) {
- Result = getDerived().RebuildPipeType(ValueType, TL.getKWLoc());
+ const PipeType *PT = Result->getAs<PipeType>();
+ bool isReadPipe = PT->isReadOnly();
+ Result = getDerived().RebuildPipeType(ValueType, TL.getKWLoc(), isReadPipe);
if (Result.isNull())
return QualType();
}
@@ -5699,7 +5713,9 @@ TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
if (TypeAliasTemplateDecl *TAT = dyn_cast_or_null<TypeAliasTemplateDecl>(
Template.getAsTemplateDecl())) {
SemaRef.Diag(TL.getNamedTypeLoc().getBeginLoc(),
- diag::err_tag_reference_non_tag) << 4;
+ diag::err_tag_reference_non_tag)
+ << TAT << Sema::NTK_TypeAliasTemplate
+ << ElaboratedType::getTagTypeKindForKeyword(T->getKeyword());
SemaRef.Diag(TAT->getLocation(), diag::note_declared_at);
}
}
@@ -5946,6 +5962,39 @@ TreeTransform<Derived>::TransformObjCInterfaceType(TypeLocBuilder &TLB,
template<typename Derived>
QualType
+TreeTransform<Derived>::TransformObjCTypeParamType(TypeLocBuilder &TLB,
+ ObjCTypeParamTypeLoc TL) {
+ const ObjCTypeParamType *T = TL.getTypePtr();
+ ObjCTypeParamDecl *OTP = cast_or_null<ObjCTypeParamDecl>(
+ getDerived().TransformDecl(T->getDecl()->getLocation(), T->getDecl()));
+ if (!OTP)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ OTP != T->getDecl()) {
+ Result = getDerived().RebuildObjCTypeParamType(OTP,
+ TL.getProtocolLAngleLoc(),
+ llvm::makeArrayRef(TL.getTypePtr()->qual_begin(),
+ TL.getNumProtocols()),
+ TL.getProtocolLocs(),
+ TL.getProtocolRAngleLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ObjCTypeParamTypeLoc NewTL = TLB.push<ObjCTypeParamTypeLoc>(Result);
+ if (TL.getNumProtocols()) {
+ NewTL.setProtocolLAngleLoc(TL.getProtocolLAngleLoc());
+ for (unsigned i = 0, n = TL.getNumProtocols(); i != n; ++i)
+ NewTL.setProtocolLoc(i, TL.getProtocolLoc(i));
+ NewTL.setProtocolRAngleLoc(TL.getProtocolRAngleLoc());
+ }
+ return Result;
+}
+
+template<typename Derived>
+QualType
TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB,
ObjCObjectTypeLoc TL) {
// Transform base type.
@@ -6617,6 +6666,7 @@ template<typename Derived>
StmtResult
TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
// The coroutine body should be re-formed by the caller if necessary.
+ // FIXME: The coroutine body is always rebuilt by ActOnFinishFunctionBody
return getDerived().TransformStmt(S->getBody());
}
@@ -7626,6 +7676,96 @@ StmtResult TreeTransform<Derived>::TransformOMPTargetParallelForSimdDirective(
return Res;
}
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTargetSimdDirective(
+ OMPTargetSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target_simd, DirName, nullptr,
+ D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeDirective(
+ OMPTeamsDistributeDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_teams_distribute, DirName,
+ nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeSimdDirective(
+ OMPTeamsDistributeSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_teams_distribute_simd, DirName, nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeParallelForSimdDirective(
+ OMPTeamsDistributeParallelForSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_teams_distribute_parallel_for_simd, DirName, nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTeamsDistributeParallelForDirective(
+ OMPTeamsDistributeParallelForDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_teams_distribute_parallel_for,
+ DirName, nullptr, D->getLocStart());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTargetTeamsDirective(
+ OMPTargetTeamsDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target_teams, DirName,
+ nullptr, D->getLocStart());
+ auto Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTargetTeamsDistributeDirective(
+ OMPTargetTeamsDistributeDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target_teams_distribute,
+ DirName, nullptr, D->getLocStart());
+ auto Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTargetTeamsDistributeParallelForDirective(
+ OMPTargetTeamsDistributeParallelForDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_target_teams_distribute_parallel_for, DirName, nullptr,
+ D->getLocStart());
+ auto Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clause transformation
//===----------------------------------------------------------------------===//
@@ -8866,6 +9006,19 @@ TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
Desig.AddDesignator(Designator::getField(D.getFieldName(),
D.getDotLoc(),
D.getFieldLoc()));
+ if (D.getField()) {
+ FieldDecl *Field = cast_or_null<FieldDecl>(
+ getDerived().TransformDecl(D.getFieldLoc(), D.getField()));
+ if (Field != D.getField())
+ // Rebuild the expression when the transformed FieldDecl is
+ // different to the already assigned FieldDecl.
+ ExprChanged = true;
+ } else {
+ // Ensure that the designator expression is rebuilt when there isn't
+ // a resolved FieldDecl in the designator as we don't want to assign
+ // a FieldDecl to a pattern designator that will be instantiated again.
+ ExprChanged = true;
+ }
continue;
}
@@ -8935,6 +9088,20 @@ TreeTransform<Derived>::TransformNoInitExpr(
template<typename Derived>
ExprResult
+TreeTransform<Derived>::TransformArrayInitLoopExpr(ArrayInitLoopExpr *E) {
+ llvm_unreachable("Unexpected ArrayInitLoopExpr outside of initializer");
+ return ExprError();
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformArrayInitIndexExpr(ArrayInitIndexExpr *E) {
+ llvm_unreachable("Unexpected ArrayInitIndexExpr outside of initializer");
+ return ExprError();
+}
+
+template<typename Derived>
+ExprResult
TreeTransform<Derived>::TransformImplicitValueInitExpr(
ImplicitValueInitExpr *E) {
TemporaryBase Rebase(*this, E->getLocStart(), DeclarationName());
@@ -9655,44 +9822,72 @@ TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
Destroyed);
}
-template<typename Derived>
-ExprResult
-TreeTransform<Derived>::TransformUnresolvedLookupExpr(
- UnresolvedLookupExpr *Old) {
- LookupResult R(SemaRef, Old->getName(), Old->getNameLoc(),
- Sema::LookupOrdinaryName);
-
+template <typename Derived>
+bool TreeTransform<Derived>::TransformOverloadExprDecls(OverloadExpr *Old,
+ bool RequiresADL,
+ LookupResult &R) {
// Transform all the decls.
- for (UnresolvedLookupExpr::decls_iterator I = Old->decls_begin(),
- E = Old->decls_end(); I != E; ++I) {
- NamedDecl *InstD = static_cast<NamedDecl*>(
- getDerived().TransformDecl(Old->getNameLoc(),
- *I));
+ bool AllEmptyPacks = true;
+ for (auto *OldD : Old->decls()) {
+ Decl *InstD = getDerived().TransformDecl(Old->getNameLoc(), OldD);
if (!InstD) {
// Silently ignore these if a UsingShadowDecl instantiated to nothing.
// This can happen because of dependent hiding.
- if (isa<UsingShadowDecl>(*I))
+ if (isa<UsingShadowDecl>(OldD))
continue;
else {
R.clear();
- return ExprError();
+ return true;
}
}
+ // Expand using pack declarations.
+ NamedDecl *SingleDecl = cast<NamedDecl>(InstD);
+ ArrayRef<NamedDecl*> Decls = SingleDecl;
+ if (auto *UPD = dyn_cast<UsingPackDecl>(InstD))
+ Decls = UPD->expansions();
+
// Expand using declarations.
- if (isa<UsingDecl>(InstD)) {
- UsingDecl *UD = cast<UsingDecl>(InstD);
- for (auto *I : UD->shadows())
- R.addDecl(I);
- continue;
+ for (auto *D : Decls) {
+ if (auto *UD = dyn_cast<UsingDecl>(D)) {
+ for (auto *SD : UD->shadows())
+ R.addDecl(SD);
+ } else {
+ R.addDecl(D);
+ }
}
- R.addDecl(InstD);
+ AllEmptyPacks &= Decls.empty();
+ };
+
+ // C++ [temp.res]/8.4.2:
+ // The program is ill-formed, no diagnostic required, if [...] lookup for
+ // a name in the template definition found a using-declaration, but the
+ // lookup in the corresponding scope in the instantiation odoes not find
+ // any declarations because the using-declaration was a pack expansion and
+ // the corresponding pack is empty
+ if (AllEmptyPacks && !RequiresADL) {
+ getSema().Diag(Old->getNameLoc(), diag::err_using_pack_expansion_empty)
+ << isa<UnresolvedMemberExpr>(Old) << Old->getNameInfo().getName();
+ return true;
}
// Resolve a kind, but don't do any further analysis. If it's
// ambiguous, the callee needs to deal with it.
R.resolveKind();
+ return false;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUnresolvedLookupExpr(
+ UnresolvedLookupExpr *Old) {
+ LookupResult R(SemaRef, Old->getName(), Old->getNameLoc(),
+ Sema::LookupOrdinaryName);
+
+ // Transform the declaration set.
+ if (TransformOverloadExprDecls(Old, Old->requiresADL(), R))
+ return ExprError();
// Rebuild the nested-name qualifier, if present.
CXXScopeSpec SS;
@@ -10222,9 +10417,23 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
CXXMethodDecl *NewCallOperator = getSema().startLambdaDefinition(
Class, E->getIntroducerRange(), NewCallOpTSI,
E->getCallOperator()->getLocEnd(),
- NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams());
+ NewCallOpTSI->getTypeLoc().castAs<FunctionProtoTypeLoc>().getParams(),
+ E->getCallOperator()->isConstexpr());
+
LSI->CallOperator = NewCallOperator;
+ for (unsigned I = 0, NumParams = NewCallOperator->getNumParams();
+ I != NumParams; ++I) {
+ auto *P = NewCallOperator->getParamDecl(I);
+ if (P->hasUninstantiatedDefaultArg()) {
+ EnterExpressionEvaluationContext Eval(
+ getSema(), Sema::PotentiallyEvaluatedIfUsed, P);
+ ExprResult R = getDerived().TransformExpr(
+ E->getCallOperator()->getParamDecl(I)->getDefaultArg());
+ P->setDefaultArg(R.get());
+ }
+ }
+
getDerived().transformAttrs(E->getCallOperator(), NewCallOperator);
getDerived().transformedLocalDecl(E->getCallOperator(), NewCallOperator);
@@ -10546,35 +10755,9 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
LookupResult R(SemaRef, Old->getMemberNameInfo(),
Sema::LookupOrdinaryName);
- // Transform all the decls.
- for (UnresolvedMemberExpr::decls_iterator I = Old->decls_begin(),
- E = Old->decls_end(); I != E; ++I) {
- NamedDecl *InstD = static_cast<NamedDecl*>(
- getDerived().TransformDecl(Old->getMemberLoc(),
- *I));
- if (!InstD) {
- // Silently ignore these if a UsingShadowDecl instantiated to nothing.
- // This can happen because of dependent hiding.
- if (isa<UsingShadowDecl>(*I))
- continue;
- else {
- R.clear();
- return ExprError();
- }
- }
-
- // Expand using declarations.
- if (isa<UsingDecl>(InstD)) {
- UsingDecl *UD = cast<UsingDecl>(InstD);
- for (auto *I : UD->shadows())
- R.addDecl(I);
- continue;
- }
-
- R.addDecl(InstD);
- }
-
- R.resolveKind();
+ // Transform the declaration set.
+ if (TransformOverloadExprDecls(Old, /*RequiresADL*/false, R))
+ return ExprError();
// Determine the naming class.
if (Old->getNamingClass()) {
@@ -10704,6 +10887,51 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
E->getRParenLoc(), None, None);
}
+ // Try to compute the result without performing a partial substitution.
+ Optional<unsigned> Result = 0;
+ for (const TemplateArgument &Arg : PackArgs) {
+ if (!Arg.isPackExpansion()) {
+ Result = *Result + 1;
+ continue;
+ }
+
+ TemplateArgumentLoc ArgLoc;
+ InventTemplateArgumentLoc(Arg, ArgLoc);
+
+ // Find the pattern of the pack expansion.
+ SourceLocation Ellipsis;
+ Optional<unsigned> OrigNumExpansions;
+ TemplateArgumentLoc Pattern =
+ getSema().getTemplateArgumentPackExpansionPattern(ArgLoc, Ellipsis,
+ OrigNumExpansions);
+
+ // Substitute under the pack expansion. Do not expand the pack (yet).
+ TemplateArgumentLoc OutPattern;
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ if (getDerived().TransformTemplateArgument(Pattern, OutPattern,
+ /*Uneval*/ true))
+ return true;
+
+ // See if we can determine the number of arguments from the result.
+ Optional<unsigned> NumExpansions =
+ getSema().getFullyPackExpandedSize(OutPattern.getArgument());
+ if (!NumExpansions) {
+ // No: we must be in an alias template expansion, and we're going to need
+ // to actually expand the packs.
+ Result = None;
+ break;
+ }
+
+ Result = *Result + *NumExpansions;
+ }
+
+ // Common case: we could determine the number of expansions without
+ // substituting.
+ if (Result)
+ return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), E->getPack(),
+ E->getPackLoc(),
+ E->getRParenLoc(), *Result, None);
+
TemplateArgumentListInfo TransformedPackArgs(E->getPackLoc(),
E->getPackLoc());
{
@@ -10716,6 +10944,8 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
return ExprError();
}
+ // Check whether we managed to fully-expand the pack.
+ // FIXME: Is it possible for us to do so and not hit the early exit path?
SmallVector<TemplateArgument, 8> Args;
bool PartialSubstitution = false;
for (auto &Loc : TransformedPackArgs.arguments()) {
@@ -11152,6 +11382,9 @@ TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
}
else if (E->getReceiverKind() == ObjCMessageExpr::SuperClass ||
E->getReceiverKind() == ObjCMessageExpr::SuperInstance) {
+ if (!E->getMethodDecl())
+ return ExprError();
+
// Build a new class message send to 'super'.
SmallVector<SourceLocation, 16> SelLocs;
E->getSelectorLocs(SelLocs);
@@ -11476,6 +11709,19 @@ TreeTransform<Derived>::RebuildMemberPointerType(QualType PointeeType,
}
template<typename Derived>
+QualType TreeTransform<Derived>::RebuildObjCTypeParamType(
+ const ObjCTypeParamDecl *Decl,
+ SourceLocation ProtocolLAngleLoc,
+ ArrayRef<ObjCProtocolDecl *> Protocols,
+ ArrayRef<SourceLocation> ProtocolLocs,
+ SourceLocation ProtocolRAngleLoc) {
+ return SemaRef.BuildObjCTypeParamType(Decl,
+ ProtocolLAngleLoc, Protocols,
+ ProtocolLocs, ProtocolRAngleLoc,
+ /*FailOnError=*/true);
+}
+
+template<typename Derived>
QualType TreeTransform<Derived>::RebuildObjCObjectType(
QualType BaseType,
SourceLocation Loc,
@@ -11626,21 +11872,48 @@ QualType TreeTransform<Derived>::RebuildFunctionNoProtoType(QualType T) {
}
template<typename Derived>
-QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(Decl *D) {
+QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
+ Decl *D) {
assert(D && "no decl found");
if (D->isInvalidDecl()) return QualType();
// FIXME: Doesn't account for ObjCInterfaceDecl!
TypeDecl *Ty;
- if (isa<UsingDecl>(D)) {
- UsingDecl *Using = cast<UsingDecl>(D);
+ if (auto *UPD = dyn_cast<UsingPackDecl>(D)) {
+ // A valid resolved using typename pack expansion decl can have multiple
+ // UsingDecls, but they must each have exactly one type, and it must be
+ // the same type in every case. But we must have at least one expansion!
+ if (UPD->expansions().empty()) {
+ getSema().Diag(Loc, diag::err_using_pack_expansion_empty)
+ << UPD->isCXXClassMember() << UPD;
+ return QualType();
+ }
+
+ // We might still have some unresolved types. Try to pick a resolved type
+ // if we can. The final instantiation will check that the remaining
+ // unresolved types instantiate to the type we pick.
+ QualType FallbackT;
+ QualType T;
+ for (auto *E : UPD->expansions()) {
+ QualType ThisT = RebuildUnresolvedUsingType(Loc, E);
+ if (ThisT.isNull())
+ continue;
+ else if (ThisT->getAs<UnresolvedUsingType>())
+ FallbackT = ThisT;
+ else if (T.isNull())
+ T = ThisT;
+ else
+ assert(getSema().Context.hasSameType(ThisT, T) &&
+ "mismatched resolved types in using pack expansion");
+ }
+ return T.isNull() ? FallbackT : T;
+ } else if (auto *Using = dyn_cast<UsingDecl>(D)) {
assert(Using->hasTypename() &&
"UnresolvedUsingTypenameDecl transformed to non-typename using");
// A valid resolved using typename decl points to exactly one type decl.
assert(++Using->shadow_begin() == Using->shadow_end());
Ty = cast<TypeDecl>((*Using->shadow_begin())->getTargetDecl());
-
} else {
assert(isa<UnresolvedUsingTypenameDecl>(D) &&
"UnresolvedUsingTypenameDecl transformed to non-using decl");
@@ -11690,8 +11963,10 @@ QualType TreeTransform<Derived>::RebuildAtomicType(QualType ValueType,
template<typename Derived>
QualType TreeTransform<Derived>::RebuildPipeType(QualType ValueType,
- SourceLocation KWLoc) {
- return SemaRef.BuildPipeType(ValueType, KWLoc);
+ SourceLocation KWLoc,
+ bool isReadPipe) {
+ return isReadPipe ? SemaRef.BuildReadPipeType(ValueType, KWLoc)
+ : SemaRef.BuildWritePipeType(ValueType, KWLoc);
}
template<typename Derived>
diff --git a/lib/Sema/TypeLocBuilder.h b/lib/Sema/TypeLocBuilder.h
index 382821859768..9c77045d2e12 100644
--- a/lib/Sema/TypeLocBuilder.h
+++ b/lib/Sema/TypeLocBuilder.h
@@ -39,7 +39,7 @@ class TypeLocBuilder {
#endif
/// The inline buffer.
- enum { BufferMaxAlignment = llvm::AlignOf<void*>::Alignment };
+ enum { BufferMaxAlignment = alignof(void *) };
llvm::AlignedCharArray<BufferMaxAlignment, InlineCapacity> InlineBuffer;
unsigned NumBytesAtAlign4, NumBytesAtAlign8;
diff --git a/lib/Serialization/ASTCommon.cpp b/lib/Serialization/ASTCommon.cpp
index 22ead2b57c72..ecd249cc5025 100644
--- a/lib/Serialization/ASTCommon.cpp
+++ b/lib/Serialization/ASTCommon.cpp
@@ -183,6 +183,7 @@ serialization::getDefinitiveDeclContext(const DeclContext *DC) {
case Decl::ExternCContext:
case Decl::Namespace:
case Decl::LinkageSpec:
+ case Decl::Export:
return nullptr;
// C/C++ tag types can only be defined in one place.
@@ -284,6 +285,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::NonTypeTemplateParm:
case Decl::TemplateTemplateParm:
case Decl::Using:
+ case Decl::UsingPack:
case Decl::ObjCMethod:
case Decl::ObjCCategory:
case Decl::ObjCCategoryImpl:
@@ -291,6 +293,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::ObjCProperty:
case Decl::ObjCCompatibleAlias:
case Decl::LinkageSpec:
+ case Decl::Export:
case Decl::ObjCPropertyImpl:
case Decl::PragmaComment:
case Decl::PragmaDetectMismatch:
@@ -307,6 +310,8 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::OMPCapturedExpr:
case Decl::OMPDeclareReduction:
case Decl::BuiltinTemplate:
+ case Decl::Decomposition:
+ case Decl::Binding:
return false;
// These indirectly derive from Redeclarable<T> but are not actually
diff --git a/lib/Serialization/ASTCommon.h b/lib/Serialization/ASTCommon.h
index 641165e4178f..cbc5f04738b1 100644
--- a/lib/Serialization/ASTCommon.h
+++ b/lib/Serialization/ASTCommon.h
@@ -30,6 +30,7 @@ enum DeclUpdateKind {
UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER,
UPD_CXX_INSTANTIATED_CLASS_DEFINITION,
UPD_CXX_INSTANTIATED_DEFAULT_ARGUMENT,
+ UPD_CXX_INSTANTIATED_DEFAULT_MEMBER_INITIALIZER,
UPD_CXX_RESOLVED_DTOR_DELETE,
UPD_CXX_RESOLVED_EXCEPTION_SPEC,
UPD_CXX_DEDUCED_RETURN_TYPE,
diff --git a/lib/Serialization/ASTReader.cpp b/lib/Serialization/ASTReader.cpp
index 9d1554a826aa..fe2c53b77e1d 100644
--- a/lib/Serialization/ASTReader.cpp
+++ b/lib/Serialization/ASTReader.cpp
@@ -1,4 +1,4 @@
-//===-- ASTReader.cpp - AST File Reader ----------------------------------===//
+//===-- ASTReader.cpp - AST File Reader -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -16,39 +16,62 @@
#include "ASTReaderInternals.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/ASTUnresolvedSet.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "clang/Frontend/PCHContainerOperations.h"
-#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/RawCommentList.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLocVisitor.h"
+#include "clang/AST/UnresolvedSet.h"
+#include "clang/Basic/CommentOptions.h"
#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemOptions.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/ObjCRuntime.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/Sanitizers.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceManagerInternals.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Basic/Version.h"
#include "clang/Basic/VersionTuple.h"
-#include "clang/Frontend/Utils.h"
+#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/ModuleMap.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/Weak.h"
#include "clang/Serialization/ASTDeserializationListener.h"
#include "clang/Serialization/GlobalModuleIndex.h"
#include "clang/Serialization/ModuleManager.h"
#include "clang/Serialization/SerializationDiagnostic.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Bitcode/BitstreamReader.h"
#include "llvm/Support/Compression.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -56,16 +79,27 @@
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <cassert>
+#include <cstdint>
#include <cstdio>
+#include <cstring>
+#include <ctime>
#include <iterator>
+#include <limits>
+#include <map>
+#include <memory>
+#include <new>
+#include <string>
#include <system_error>
+#include <tuple>
+#include <utility>
+#include <vector>
using namespace clang;
using namespace clang::serialization;
using namespace clang::serialization::reader;
using llvm::BitstreamCursor;
-
//===----------------------------------------------------------------------===//
// ChainedASTReaderListener implementation
//===----------------------------------------------------------------------===//
@@ -75,14 +109,17 @@ ChainedASTReaderListener::ReadFullVersionInformation(StringRef FullVersion) {
return First->ReadFullVersionInformation(FullVersion) ||
Second->ReadFullVersionInformation(FullVersion);
}
+
void ChainedASTReaderListener::ReadModuleName(StringRef ModuleName) {
First->ReadModuleName(ModuleName);
Second->ReadModuleName(ModuleName);
}
+
void ChainedASTReaderListener::ReadModuleMapFile(StringRef ModuleMapPath) {
First->ReadModuleMapFile(ModuleMapPath);
Second->ReadModuleMapFile(ModuleMapPath);
}
+
bool
ChainedASTReaderListener::ReadLanguageOptions(const LangOptions &LangOpts,
bool Complain,
@@ -92,6 +129,7 @@ ChainedASTReaderListener::ReadLanguageOptions(const LangOptions &LangOpts,
Second->ReadLanguageOptions(LangOpts, Complain,
AllowCompatibleDifferences);
}
+
bool ChainedASTReaderListener::ReadTargetOptions(
const TargetOptions &TargetOpts, bool Complain,
bool AllowCompatibleDifferences) {
@@ -100,11 +138,13 @@ bool ChainedASTReaderListener::ReadTargetOptions(
Second->ReadTargetOptions(TargetOpts, Complain,
AllowCompatibleDifferences);
}
+
bool ChainedASTReaderListener::ReadDiagnosticOptions(
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts, bool Complain) {
return First->ReadDiagnosticOptions(DiagOpts, Complain) ||
Second->ReadDiagnosticOptions(DiagOpts, Complain);
}
+
bool
ChainedASTReaderListener::ReadFileSystemOptions(const FileSystemOptions &FSOpts,
bool Complain) {
@@ -120,6 +160,7 @@ bool ChainedASTReaderListener::ReadHeaderSearchOptions(
Second->ReadHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
Complain);
}
+
bool ChainedASTReaderListener::ReadPreprocessorOptions(
const PreprocessorOptions &PPOpts, bool Complain,
std::string &SuggestedPredefines) {
@@ -145,6 +186,7 @@ void ChainedASTReaderListener::visitModuleFile(StringRef Filename,
First->visitModuleFile(Filename, Kind);
Second->visitModuleFile(Filename, Kind);
}
+
bool ChainedASTReaderListener::visitInputFile(StringRef Filename,
bool isSystem,
bool isOverridden,
@@ -336,11 +378,13 @@ bool PCHValidator::ReadTargetOptions(const TargetOptions &TargetOpts,
}
namespace {
+
typedef llvm::StringMap<std::pair<StringRef, bool /*IsUndef*/> >
MacroDefinitionsMap;
typedef llvm::DenseMap<DeclarationName, SmallVector<NamedDecl *, 8> >
DeclsMap;
-}
+
+} // end anonymous namespace
static bool checkDiagnosticGroupMappings(DiagnosticsEngine &StoredDiags,
DiagnosticsEngine &Diags,
@@ -496,12 +540,16 @@ collectMacroDefinitions(const PreprocessorOptions &PPOpts,
/// against the preprocessor options in an existing preprocessor.
///
/// \param Diags If non-null, produce diagnostics for any mismatches incurred.
+/// \param Validate If true, validate preprocessor options. If false, allow
+/// macros defined by \p ExistingPPOpts to override those defined by
+/// \p PPOpts in SuggestedPredefines.
static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
const PreprocessorOptions &ExistingPPOpts,
DiagnosticsEngine *Diags,
FileManager &FileMgr,
std::string &SuggestedPredefines,
- const LangOptions &LangOpts) {
+ const LangOptions &LangOpts,
+ bool Validate = true) {
// Check macro definitions.
MacroDefinitionsMap ASTFileMacros;
collectMacroDefinitions(PPOpts, ASTFileMacros);
@@ -517,7 +565,7 @@ static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
// Check whether we know anything about this macro name or not.
llvm::StringMap<std::pair<StringRef, bool /*IsUndef*/> >::iterator Known
= ASTFileMacros.find(MacroName);
- if (Known == ASTFileMacros.end()) {
+ if (!Validate || Known == ASTFileMacros.end()) {
// FIXME: Check whether this identifier was referenced anywhere in the
// AST file. If so, we should reject the AST file. Unfortunately, this
// information isn't in the control block. What shall we do about it?
@@ -560,7 +608,7 @@ static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
}
// Check whether we're using predefines.
- if (PPOpts.UsePredefines != ExistingPPOpts.UsePredefines) {
+ if (PPOpts.UsePredefines != ExistingPPOpts.UsePredefines && Validate) {
if (Diags) {
Diags->Report(diag::err_pch_undef) << ExistingPPOpts.UsePredefines;
}
@@ -569,7 +617,7 @@ static bool checkPreprocessorOptions(const PreprocessorOptions &PPOpts,
// Detailed record is important since it is used for the module cache hash.
if (LangOpts.Modules &&
- PPOpts.DetailedRecord != ExistingPPOpts.DetailedRecord) {
+ PPOpts.DetailedRecord != ExistingPPOpts.DetailedRecord && Validate) {
if (Diags) {
Diags->Report(diag::err_pch_pp_detailed_record) << PPOpts.DetailedRecord;
}
@@ -618,6 +666,19 @@ bool PCHValidator::ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
PP.getLangOpts());
}
+bool SimpleASTReaderListener::ReadPreprocessorOptions(
+ const PreprocessorOptions &PPOpts,
+ bool Complain,
+ std::string &SuggestedPredefines) {
+ return checkPreprocessorOptions(PPOpts,
+ PP.getPreprocessorOpts(),
+ nullptr,
+ PP.getFileManager(),
+ SuggestedPredefines,
+ PP.getLangOpts(),
+ false);
+}
+
/// Check the header search options deserialized from the control block
/// against the header search options in an existing preprocessor.
///
@@ -662,13 +723,10 @@ void ASTReader::setDeserializationListener(ASTDeserializationListener *Listener,
OwnsDeserializationListener = TakeOwnership;
}
-
-
unsigned ASTSelectorLookupTrait::ComputeHash(Selector Sel) {
return serialization::ComputeHash(Sel);
}
-
std::pair<unsigned, unsigned>
ASTSelectorLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
using namespace llvm::support;
@@ -677,7 +735,7 @@ ASTSelectorLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
return std::make_pair(KeyLen, DataLen);
}
-ASTSelectorLookupTrait::internal_key_type
+ASTSelectorLookupTrait::internal_key_type
ASTSelectorLookupTrait::ReadKey(const unsigned char* d, unsigned) {
using namespace llvm::support;
SelectorTable &SelTable = Reader.getContext().Selectors;
@@ -698,8 +756,8 @@ ASTSelectorLookupTrait::ReadKey(const unsigned char* d, unsigned) {
return SelTable.getSelector(N, Args.data());
}
-ASTSelectorLookupTrait::data_type
-ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d,
+ASTSelectorLookupTrait::data_type
+ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d,
unsigned DataLen) {
using namespace llvm::support;
@@ -1138,7 +1196,7 @@ bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
RecordData Record;
while (true) {
llvm::BitstreamEntry E = SLocEntryCursor.advanceSkippingSubblocks();
-
+
switch (E.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
case llvm::BitstreamEntry::Error:
@@ -1150,7 +1208,7 @@ bool ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
// The interesting case.
break;
}
-
+
// Read a record.
Record.clear();
StringRef Blob;
@@ -1246,7 +1304,7 @@ bool ASTReader::ReadSLocEntry(int ID) {
Error("incorrectly-formatted source location entry in AST file");
return true;
}
-
+
RecordData Record;
StringRef Blob;
switch (SLocEntryCursor.readRecord(Entry.ID, Record, &Blob)) {
@@ -1312,8 +1370,7 @@ bool ASTReader::ReadSLocEntry(int ID) {
SrcMgr::CharacteristicKind
FileCharacter = (SrcMgr::CharacteristicKind)Record[2];
SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]);
- if (IncludeLoc.isInvalid() &&
- (F->Kind == MK_ImplicitModule || F->Kind == MK_ExplicitModule)) {
+ if (IncludeLoc.isInvalid() && F->isModule()) {
IncludeLoc = getImportLocation(F);
}
@@ -1351,7 +1408,7 @@ std::pair<SourceLocation, StringRef> ASTReader::getModuleImportLoc(int ID) {
// Find which module file this entry lands in.
ModuleFile *M = GlobalSLocEntryMap.find(-ID)->second;
- if (M->Kind != MK_ImplicitModule && M->Kind != MK_ExplicitModule)
+ if (!M->isModule())
return std::make_pair(SourceLocation(), "");
// FIXME: Can we map this down to a particular submodule? That would be
@@ -1363,7 +1420,7 @@ std::pair<SourceLocation, StringRef> ASTReader::getModuleImportLoc(int ID) {
SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
if (F->ImportLoc.isValid())
return F->ImportLoc;
-
+
// Otherwise we have a PCH. It's considered to be "imported" at the first
// location of its includer.
if (F->ImportedBy.empty() || !F->ImportedBy[0]) {
@@ -1425,7 +1482,7 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
// be able to reseek within the block and read entries.
unsigned Flags = BitstreamCursor::AF_DontPopBlockAtEnd;
llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks(Flags);
-
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
case llvm::BitstreamEntry::Error:
@@ -1517,13 +1574,13 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
}
}
-PreprocessedEntityID
+PreprocessedEntityID
ASTReader::getGlobalPreprocessedEntityID(ModuleFile &M, unsigned LocalID) const {
- ContinuousRangeMap<uint32_t, int, 2>::const_iterator
+ ContinuousRangeMap<uint32_t, int, 2>::const_iterator
I = M.PreprocessedEntityRemap.find(LocalID - NUM_PREDEF_PP_ENTITY_IDS);
- assert(I != M.PreprocessedEntityRemap.end()
+ assert(I != M.PreprocessedEntityRemap.end()
&& "Invalid index into preprocessed entity index remap");
-
+
return LocalID + I->second;
}
@@ -1531,22 +1588,21 @@ unsigned HeaderFileInfoTrait::ComputeHash(internal_key_ref ikey) {
return llvm::hash_combine(ikey.Size, ikey.ModTime);
}
-HeaderFileInfoTrait::internal_key_type
+HeaderFileInfoTrait::internal_key_type
HeaderFileInfoTrait::GetInternalKey(const FileEntry *FE) {
internal_key_type ikey = {FE->getSize(),
M.HasTimestamps ? FE->getModificationTime() : 0,
FE->getName(), /*Imported*/ false};
return ikey;
}
-
+
bool HeaderFileInfoTrait::EqualKey(internal_key_ref a, internal_key_ref b) {
if (a.Size != b.Size || (a.ModTime && b.ModTime && a.ModTime != b.ModTime))
return false;
- if (llvm::sys::path::is_absolute(a.Filename) &&
- strcmp(a.Filename, b.Filename) == 0)
+ if (llvm::sys::path::is_absolute(a.Filename) && a.Filename == b.Filename)
return true;
-
+
// Determine whether the actual files are equivalent.
FileManager &FileMgr = Reader.getFileManager();
auto GetFile = [&](const internal_key_type &Key) -> const FileEntry* {
@@ -1562,7 +1618,7 @@ bool HeaderFileInfoTrait::EqualKey(internal_key_ref a, internal_key_ref b) {
const FileEntry *FEB = GetFile(b);
return FEA && FEA == FEB;
}
-
+
std::pair<unsigned, unsigned>
HeaderFileInfoTrait::ReadKeyDataLength(const unsigned char*& d) {
using namespace llvm::support;
@@ -1582,7 +1638,7 @@ HeaderFileInfoTrait::ReadKey(const unsigned char *d, unsigned) {
return ikey;
}
-HeaderFileInfoTrait::data_type
+HeaderFileInfoTrait::data_type
HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
unsigned DataLen) {
const unsigned char *End = d + DataLen;
@@ -1602,7 +1658,7 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
M, endian::readNext<uint32_t, little, unaligned>(d));
if (unsigned FrameworkOffset =
endian::readNext<uint32_t, little, unaligned>(d)) {
- // The framework offset is 1 greater than the actual offset,
+ // The framework offset is 1 greater than the actual offset,
// since 0 is used as an indicator for "no framework name".
StringRef FrameworkName(FrameworkStrings + FrameworkOffset - 1);
HFI.Framework = HS->getUniqueFrameworkName(FrameworkName);
@@ -1655,7 +1711,7 @@ void ASTReader::ReadDefinedMacros() {
BitstreamCursor &MacroCursor = I->MacroCursor;
// If there was no preprocessor block, skip this file.
- if (!MacroCursor.getBitStreamReader())
+ if (MacroCursor.getBitcodeBytes().empty())
continue;
BitstreamCursor Cursor = MacroCursor;
@@ -1664,7 +1720,7 @@ void ASTReader::ReadDefinedMacros() {
RecordData Record;
while (true) {
llvm::BitstreamEntry E = Cursor.advanceSkippingSubblocks();
-
+
switch (E.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
case llvm::BitstreamEntry::Error:
@@ -1672,13 +1728,13 @@ void ASTReader::ReadDefinedMacros() {
return;
case llvm::BitstreamEntry::EndBlock:
goto NextCursor;
-
+
case llvm::BitstreamEntry::Record:
Record.clear();
switch (Cursor.readRecord(E.ID, Record)) {
default: // Default behavior: ignore.
break;
-
+
case PP_MACRO_OBJECT_LIKE:
case PP_MACRO_FUNCTION_LIKE: {
IdentifierInfo *II = getLocalIdentifier(*I, Record[0]);
@@ -1686,7 +1742,7 @@ void ASTReader::ReadDefinedMacros() {
updateOutOfDateIdentifier(*II);
break;
}
-
+
case PP_TOKEN:
// Ignore tokens.
break;
@@ -1699,6 +1755,7 @@ void ASTReader::ReadDefinedMacros() {
}
namespace {
+
/// \brief Visitor class used to look up identifirs in an AST file.
class IdentifierLookupVisitor {
StringRef Name;
@@ -1737,7 +1794,7 @@ namespace {
IdTable->find_hashed(Name, NameHash, &Trait);
if (Pos == IdTable->end())
return false;
-
+
// Dereferencing the iterator has the effect of building the
// IdentifierInfo node and populating it with the various
// declarations it needs.
@@ -1745,12 +1802,13 @@ namespace {
Found = *Pos;
return true;
}
-
+
// \brief Retrieve the identifier info found within the module
// files.
IdentifierInfo *getIdentifierInfo() const { return Found; }
};
-}
+
+} // end anonymous namespace
void ASTReader::updateOutOfDateIdentifier(IdentifierInfo &II) {
// Note that we are loading an identifier.
@@ -1780,7 +1838,7 @@ void ASTReader::updateOutOfDateIdentifier(IdentifierInfo &II) {
void ASTReader::markIdentifierUpToDate(IdentifierInfo *II) {
if (!II)
return;
-
+
II->setOutOfDate(false);
// Update the generation for this identifier.
@@ -1861,7 +1919,7 @@ void ASTReader::resolvePendingMacro(IdentifierInfo *II,
// Don't read the directive history for a module; we don't have anywhere
// to put it.
- if (M.Kind == MK_ImplicitModule || M.Kind == MK_ExplicitModule)
+ if (M.isModule())
return;
// Deserialize the macro directives history in reverse source-order.
@@ -1895,7 +1953,7 @@ void ASTReader::resolvePendingMacro(IdentifierInfo *II,
}
if (Latest)
- PP.setLoadedMacroDirective(II, Latest);
+ PP.setLoadedMacroDirective(II, Earliest, Latest);
}
ASTReader::InputFileInfo
@@ -1925,6 +1983,7 @@ ASTReader::readInputFileInfo(ModuleFile &F, unsigned ID) {
return R;
}
+static unsigned moduleKindForDiagnostic(ModuleKind Kind);
InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// If this ID is bogus, just return an empty input file.
if (ID == 0 || ID > F.InputFilesLoaded.size())
@@ -1941,7 +2000,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
BitstreamCursor &Cursor = F.InputFilesCursor;
SavedStreamPosition SavedPosition(Cursor);
Cursor.JumpToBit(F.InputFileOffsets[ID-1]);
-
+
InputFileInfo FI = readInputFileInfo(F, ID);
off_t StoredSize = FI.StoredSize;
time_t StoredTime = FI.StoredTime;
@@ -1974,7 +2033,7 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
ErrorStr += "' referenced by AST file '";
ErrorStr += F.FileName;
ErrorStr += "'";
- Error(ErrorStr.c_str());
+ Error(ErrorStr);
}
// Record that we didn't find the file.
F.InputFilesLoaded[ID-1] = InputFile::getNotFound();
@@ -2021,7 +2080,13 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
// The top-level PCH is stale.
StringRef TopLevelPCHName(ImportStack.back()->FileName);
- Error(diag::err_fe_pch_file_modified, Filename, TopLevelPCHName);
+ unsigned DiagnosticKind = moduleKindForDiagnostic(ImportStack.back()->Kind);
+ if (DiagnosticKind == 0)
+ Error(diag::err_fe_pch_file_modified, Filename, TopLevelPCHName);
+ else if (DiagnosticKind == 1)
+ Error(diag::err_fe_module_file_modified, Filename, TopLevelPCHName);
+ else
+ Error(diag::err_fe_ast_file_modified, Filename, TopLevelPCHName);
// Print the import stack.
if (ImportStack.size() > 1 && !Diags.isDiagnosticInFlight()) {
@@ -2084,16 +2149,16 @@ static bool isDiagnosedResult(ASTReader::ASTReadResult ARR, unsigned Caps) {
ASTReader::ASTReadResult ASTReader::ReadOptionsBlock(
BitstreamCursor &Stream, unsigned ClientLoadCapabilities,
bool AllowCompatibleConfigurationMismatch, ASTReaderListener &Listener,
- std::string &SuggestedPredefines) {
+ std::string &SuggestedPredefines, bool ValidateDiagnosticOptions) {
if (Stream.EnterSubBlock(OPTIONS_BLOCK_ID))
return Failure;
// Read all of the records in the options block.
RecordData Record;
ASTReadResult Result = Success;
- while (1) {
+ while (true) {
llvm::BitstreamEntry Entry = Stream.advance();
-
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
case llvm::BitstreamEntry::SubBlock:
@@ -2128,7 +2193,8 @@ ASTReader::ASTReadResult ASTReader::ReadOptionsBlock(
case DIAGNOSTIC_OPTIONS: {
bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
- if (!AllowCompatibleConfigurationMismatch &&
+ if (ValidateDiagnosticOptions &&
+ !AllowCompatibleConfigurationMismatch &&
ParseDiagnosticOptions(Record, Complain, Listener))
return OutOfDate;
break;
@@ -2178,9 +2244,9 @@ ASTReader::ReadControlBlock(ModuleFile &F,
RecordData Record;
unsigned NumInputs = 0;
unsigned NumUserInputs = 0;
- while (1) {
+ while (true) {
llvm::BitstreamEntry Entry = Stream.advance();
-
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
Error("malformed block record in AST file");
@@ -2193,7 +2259,8 @@ ASTReader::ReadControlBlock(ModuleFile &F,
// All user input files reside at the index range [0, NumUserInputs), and
// system input files reside at [NumUserInputs, NumInputs). For explicitly
// loaded module files, ignore missing inputs.
- if (!DisableValidation && F.Kind != MK_ExplicitModule) {
+ if (!DisableValidation && F.Kind != MK_ExplicitModule &&
+ F.Kind != MK_PrebuiltModule) {
bool Complain = (ClientLoadCapabilities & ARR_OutOfDate) == 0;
// If we are reading a module, we will create a verification timestamp,
@@ -2224,7 +2291,8 @@ ASTReader::ReadControlBlock(ModuleFile &F,
bool IsSystem = I >= NumUserInputs;
InputFileInfo FI = readInputFileInfo(F, I+1);
Listener->visitInputFile(FI.Filename, IsSystem, FI.Overridden,
- F.Kind == MK_ExplicitModule);
+ F.Kind == MK_ExplicitModule ||
+ F.Kind == MK_PrebuiltModule);
}
}
@@ -2254,11 +2322,14 @@ ASTReader::ReadControlBlock(ModuleFile &F,
//
// FIXME: Allow this for files explicitly specified with -include-pch.
bool AllowCompatibleConfigurationMismatch =
- F.Kind == MK_ExplicitModule;
+ F.Kind == MK_ExplicitModule || F.Kind == MK_PrebuiltModule;
+ const HeaderSearchOptions &HSOpts =
+ PP.getHeaderSearchInfo().getHeaderSearchOpts();
Result = ReadOptionsBlock(Stream, ClientLoadCapabilities,
AllowCompatibleConfigurationMismatch,
- *Listener, SuggestedPredefines);
+ *Listener, SuggestedPredefines,
+ HSOpts.ModulesValidateDiagnosticOptions);
if (Result == Failure) {
Error("malformed block record in AST file");
return Result;
@@ -2278,7 +2349,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
return Failure;
}
continue;
-
+
default:
if (Stream.SkipBlock()) {
Error("malformed block record in AST file");
@@ -2286,7 +2357,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
}
continue;
}
-
+
case llvm::BitstreamEntry::Record:
// The interesting case.
break;
@@ -2338,7 +2409,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
break;
case IMPORTS: {
- // Load each of the imported PCH files.
+ // Load each of the imported PCH files.
unsigned Idx = 0, N = Record.size();
while (Idx < N) {
// Read information about the AST file.
@@ -2413,7 +2484,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
if (M && M->Directory) {
// If we're implicitly loading a module, the base directory can't
// change between the build and use.
- if (F.Kind != MK_ExplicitModule) {
+ if (F.Kind != MK_ExplicitModule && F.Kind != MK_PrebuiltModule) {
const DirectoryEntry *BuildDir =
PP.getFileManager().getDirectory(Blob);
if (!BuildDir || BuildDir != M->Directory) {
@@ -2458,9 +2529,9 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// Read all of the records and blocks for the AST file.
RecordData Record;
- while (1) {
+ while (true) {
llvm::BitstreamEntry Entry = Stream.advance();
-
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
Error("error at end of module block in AST file");
@@ -2475,7 +2546,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
if (DC->hasExternalLexicalStorage() &&
!getContext().getLangOpts().CPlusPlus)
DC->setMustBuildLookupTable();
-
+
return Success;
}
case llvm::BitstreamEntry::SubBlock:
@@ -2498,7 +2569,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.MacroCursor = Stream;
if (!PP.getExternalSource())
PP.setExternalSource(this);
-
+
if (Stream.SkipBlock() ||
ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID)) {
Error("malformed block record in AST file");
@@ -2506,7 +2577,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
F.MacroStartOffset = F.MacroCursor.GetCurrentBitNo();
break;
-
+
case PREPROCESSOR_DETAIL_BLOCK_ID:
F.PreprocessorDetailCursor = Stream;
if (Stream.SkipBlock() ||
@@ -2517,23 +2588,23 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
F.PreprocessorDetailStartOffset
= F.PreprocessorDetailCursor.GetCurrentBitNo();
-
+
if (!PP.getPreprocessingRecord())
PP.createPreprocessingRecord();
if (!PP.getPreprocessingRecord()->getExternalSource())
PP.getPreprocessingRecord()->SetExternalSource(*this);
break;
-
+
case SOURCE_MANAGER_BLOCK_ID:
if (ReadSourceManagerBlock(F))
return Failure;
break;
-
+
case SUBMODULE_BLOCK_ID:
if (ASTReadResult Result = ReadSubmoduleBlock(F, ClientLoadCapabilities))
return Result;
break;
-
+
case COMMENTS_BLOCK_ID: {
BitstreamCursor C = Stream;
if (Stream.SkipBlock() ||
@@ -2544,7 +2615,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
CommentsCursors.push_back(std::make_pair(C, &F));
break;
}
-
+
default:
if (Stream.SkipBlock()) {
Error("malformed block record in AST file");
@@ -2553,7 +2624,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
}
continue;
-
+
case llvm::BitstreamEntry::Record:
// The interesting case.
break;
@@ -2575,21 +2646,21 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.LocalNumTypes = Record[0];
unsigned LocalBaseTypeIndex = Record[1];
F.BaseTypeIndex = getTotalNumTypes();
-
+
if (F.LocalNumTypes > 0) {
// Introduce the global -> local mapping for types within this module.
GlobalTypeMap.insert(std::make_pair(getTotalNumTypes(), &F));
-
+
// Introduce the local -> global mapping for types within this module.
F.TypeRemap.insertOrReplace(
- std::make_pair(LocalBaseTypeIndex,
+ std::make_pair(LocalBaseTypeIndex,
F.BaseTypeIndex - LocalBaseTypeIndex));
TypesLoaded.resize(TypesLoaded.size() + F.LocalNumTypes);
}
break;
}
-
+
case DECL_OFFSET: {
if (F.LocalNumDecls != 0) {
Error("duplicate DECL_OFFSET record in AST file");
@@ -2599,18 +2670,18 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.LocalNumDecls = Record[0];
unsigned LocalBaseDeclID = Record[1];
F.BaseDeclID = getTotalNumDecls();
-
+
if (F.LocalNumDecls > 0) {
- // Introduce the global -> local mapping for declarations within this
+ // Introduce the global -> local mapping for declarations within this
// module.
GlobalDeclMap.insert(
std::make_pair(getTotalNumDecls() + NUM_PREDEF_DECL_IDS, &F));
-
+
// Introduce the local -> global mapping for declarations within this
// module.
F.DeclRemap.insertOrReplace(
std::make_pair(LocalBaseDeclID, F.BaseDeclID - LocalBaseDeclID));
-
+
// Introduce the global -> local mapping for declarations within this
// module.
F.GlobalToLocalDeclIDs[&F] = LocalBaseDeclID;
@@ -2619,7 +2690,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
break;
}
-
+
case TU_UPDATE_LEXICAL: {
DeclContext *TU = Context.getTranslationUnitDecl();
LexicalContents Contents(
@@ -2651,7 +2722,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
(const unsigned char *)F.IdentifierTableData + sizeof(uint32_t),
(const unsigned char *)F.IdentifierTableData,
ASTIdentifierLookupTrait(*this, F));
-
+
PP.getIdentifierTable().setExternalIdentifierLookup(this);
}
break;
@@ -2665,13 +2736,13 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.LocalNumIdentifiers = Record[0];
unsigned LocalBaseIdentifierID = Record[1];
F.BaseIdentifierID = getTotalNumIdentifiers();
-
+
if (F.LocalNumIdentifiers > 0) {
// Introduce the global -> local mapping for identifiers within this
// module.
- GlobalIdentifierMap.insert(std::make_pair(getTotalNumIdentifiers() + 1,
+ GlobalIdentifierMap.insert(std::make_pair(getTotalNumIdentifiers() + 1,
&F));
-
+
// Introduce the local -> global mapping for identifiers within this
// module.
F.IdentifierRemap.insertOrReplace(
@@ -2738,11 +2809,11 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error("invalid weak identifiers record");
return Failure;
}
-
- // FIXME: Ignore weak undeclared identifiers from non-original PCH
+
+ // FIXME: Ignore weak undeclared identifiers from non-original PCH
// files. This isn't the way to do it :)
WeakUndeclaredIdentifiers.clear();
-
+
// Translate the weak, undeclared identifiers into global IDs.
for (unsigned I = 0, N = Record.size(); I < N; /* in loop */) {
WeakUndeclaredIdentifiers.push_back(
@@ -2760,13 +2831,13 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.LocalNumSelectors = Record[0];
unsigned LocalBaseSelectorID = Record[1];
F.BaseSelectorID = getTotalNumSelectors();
-
+
if (F.LocalNumSelectors > 0) {
- // Introduce the global -> local mapping for selectors within this
+ // Introduce the global -> local mapping for selectors within this
// module.
GlobalSelectorMap.insert(std::make_pair(getTotalNumSelectors()+1, &F));
-
- // Introduce the local -> global mapping for selectors within this
+
+ // Introduce the local -> global mapping for selectors within this
// module.
F.SelectorRemap.insertOrReplace(
std::make_pair(LocalBaseSelectorID,
@@ -2776,7 +2847,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
break;
}
-
+
case METHOD_POOL:
F.SelectorLookupTableData = (const unsigned char *)Blob.data();
if (Record[0])
@@ -2791,7 +2862,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case REFERENCED_SELECTOR_POOL:
if (!Record.empty()) {
for (unsigned Idx = 0, N = Record.size() - 1; Idx < N; /* in loop */) {
- ReferencedSelectorsData.push_back(getGlobalSelectorID(F,
+ ReferencedSelectorsData.push_back(getGlobalSelectorID(F,
Record[Idx++]));
ReferencedSelectorsData.push_back(ReadSourceLocation(F, Record, Idx).
getRawEncoding());
@@ -2803,7 +2874,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
if (!Record.empty() && Listener)
Listener->ReadCounter(F, Record[0]);
break;
-
+
case FILE_SORTED_DECLS:
F.FileSortedDecls = (const DeclID *)Blob.data();
F.NumFileSortedDecls = Record[0];
@@ -2840,7 +2911,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// This module. Base was 2 when being compiled.
F.SLocRemap.insertOrReplace(std::make_pair(2U,
static_cast<int>(F.SLocEntryBaseOffset - 2)));
-
+
TotalNumSLocEntries += F.LocalNumSLocEntries;
break;
}
@@ -2932,7 +3003,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error("Multiple SOURCE_LOCATION_PRELOADS records in AST file");
return Failure;
}
-
+
F.PreloadSLocEntries.swap(Record);
break;
}
@@ -2947,12 +3018,12 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error("Invalid VTABLE_USES record");
return Failure;
}
-
+
// Later tables overwrite earlier ones.
// FIXME: Modules will have some trouble with this. This is clearly not
// the right way to do this.
VTableUses.clear();
-
+
for (unsigned Idx = 0, N = Record.size(); Idx != N; /* In loop */) {
VTableUses.push_back(getGlobalDeclID(F, Record[Idx++]));
VTableUses.push_back(
@@ -2980,7 +3051,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case SEMA_DECL_REFS:
- if (Record.size() != 2) {
+ if (Record.size() != 3) {
Error("Invalid SEMA_DECL_REFS block");
return Failure;
}
@@ -2994,13 +3065,13 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.NumPreprocessedEntities = Blob.size() / sizeof(PPEntityOffset);
unsigned LocalBasePreprocessedEntityID = Record[0];
-
+
unsigned StartingID;
if (!PP.getPreprocessingRecord())
PP.createPreprocessingRecord();
if (!PP.getPreprocessingRecord()->getExternalSource())
PP.getPreprocessingRecord()->SetExternalSource(*this);
- StartingID
+ StartingID
= PP.getPreprocessingRecord()
->allocateLoadedEntities(F.NumPreprocessedEntities);
F.BasePreprocessedEntityID = StartingID;
@@ -3009,7 +3080,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// Introduce the global -> local mapping for preprocessed entities in
// this module.
GlobalPreprocessedEntityMap.insert(std::make_pair(StartingID, &F));
-
+
// Introduce the local -> global mapping for preprocessed entities in
// this module.
F.PreprocessedEntityRemap.insertOrReplace(
@@ -3019,7 +3090,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
}
-
+
case DECL_UPDATE_OFFSETS: {
if (Record.size() % 2 != 0) {
Error("invalid DECL_UPDATE_OFFSETS block in AST file");
@@ -3042,12 +3113,12 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Error("duplicate OBJC_CATEGORIES_MAP record in AST file");
return Failure;
}
-
+
F.LocalNumObjCCategoriesInMap = Record[0];
F.ObjCCategoriesMap = (const ObjCCategoriesInfo *)Blob.data();
break;
}
-
+
case OBJC_CATEGORIES:
F.ObjCCategories.swap(Record);
break;
@@ -3059,7 +3130,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.PragmaDiagMappings.insert(F.PragmaDiagMappings.end(),
Record.begin(), Record.end());
break;
-
+
case CUDA_SPECIAL_DECL_REFS:
// Later tables overwrite earlier ones.
// FIXME: Modules will have trouble with this.
@@ -3076,32 +3147,62 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
= HeaderFileInfoLookupTable::Create(
(const unsigned char *)F.HeaderFileInfoTableData + Record[0],
(const unsigned char *)F.HeaderFileInfoTableData,
- HeaderFileInfoTrait(*this, F,
+ HeaderFileInfoTrait(*this, F,
&PP.getHeaderSearchInfo(),
Blob.data() + Record[2]));
-
+
PP.getHeaderSearchInfo().SetExternalSource(this);
if (!PP.getHeaderSearchInfo().getExternalLookup())
PP.getHeaderSearchInfo().SetExternalLookup(this);
}
break;
}
-
+
case FP_PRAGMA_OPTIONS:
// Later tables overwrite earlier ones.
FPPragmaOptions.swap(Record);
break;
case OPENCL_EXTENSIONS:
- // Later tables overwrite earlier ones.
- OpenCLExtensions.swap(Record);
+ for (unsigned I = 0, E = Record.size(); I != E; ) {
+ auto Name = ReadString(Record, I);
+ auto &Opt = OpenCLExtensions.OptMap[Name];
+ Opt.Supported = Record[I++] != 0;
+ Opt.Enabled = Record[I++] != 0;
+ Opt.Avail = Record[I++];
+ Opt.Core = Record[I++];
+ }
+ break;
+
+ case OPENCL_EXTENSION_TYPES:
+ for (unsigned I = 0, E = Record.size(); I != E;) {
+ auto TypeID = static_cast<::TypeID>(Record[I++]);
+ auto *Type = GetType(TypeID).getTypePtr();
+ auto NumExt = static_cast<unsigned>(Record[I++]);
+ for (unsigned II = 0; II != NumExt; ++II) {
+ auto Ext = ReadString(Record, I);
+ OpenCLTypeExtMap[Type].insert(Ext);
+ }
+ }
+ break;
+
+ case OPENCL_EXTENSION_DECLS:
+ for (unsigned I = 0, E = Record.size(); I != E;) {
+ auto DeclID = static_cast<::DeclID>(Record[I++]);
+ auto *Decl = GetDecl(DeclID);
+ auto NumExt = static_cast<unsigned>(Record[I++]);
+ for (unsigned II = 0; II != NumExt; ++II) {
+ auto Ext = ReadString(Record, I);
+ OpenCLDeclExtMap[Decl].insert(Ext);
+ }
+ }
break;
case TENTATIVE_DEFINITIONS:
for (unsigned I = 0, N = Record.size(); I != N; ++I)
TentativeDefinitions.push_back(getGlobalDeclID(F, Record[I]));
break;
-
+
case KNOWN_NAMESPACES:
for (unsigned I = 0, N = Record.size(); I != N; ++I)
KnownNamespaces.push_back(getGlobalDeclID(F, Record[I]));
@@ -3137,7 +3238,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
break;
case IMPORTED_MODULES: {
- if (F.Kind != MK_ImplicitModule && F.Kind != MK_ExplicitModule) {
+ if (!F.isModule()) {
// If we aren't loading a module (which has its own exports), make
// all of the imported modules visible.
// FIXME: Deal with macros-only imports.
@@ -3210,6 +3311,14 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
UnusedLocalTypedefNameCandidates.push_back(
getGlobalDeclID(F, Record[I]));
break;
+
+ case CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH:
+ if (Record.size() != 1) {
+ Error("invalid cuda pragma options record");
+ return Failure;
+ }
+ ForceCUDAHostDeviceDepth = Record[0];
+ break;
}
}
}
@@ -3221,7 +3330,7 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
unsigned Idx = 0;
F.ModuleMapPath = ReadPath(F, Record, Idx);
- if (F.Kind == MK_ExplicitModule) {
+ if (F.Kind == MK_ExplicitModule || F.Kind == MK_PrebuiltModule) {
// For an explicitly-loaded module, we don't care whether the original
// module map file exists or matches.
return Success;
@@ -3404,6 +3513,31 @@ void ASTReader::makeModuleVisible(Module *Mod,
}
}
+/// We've merged the definition \p MergedDef into the existing definition
+/// \p Def. Ensure that \p Def is made visible whenever \p MergedDef is made
+/// visible.
+void ASTReader::mergeDefinitionVisibility(NamedDecl *Def,
+ NamedDecl *MergedDef) {
+ // FIXME: This doesn't correctly handle the case where MergedDef is visible
+ // in modules other than its owning module. We should instead give the
+ // ASTContext a list of merged definitions for Def.
+ if (Def->isHidden()) {
+ // If MergedDef is visible or becomes visible, make the definition visible.
+ if (!MergedDef->isHidden())
+ Def->Hidden = false;
+ else if (getContext().getLangOpts().ModulesLocalVisibility) {
+ getContext().mergeDefinitionIntoModule(
+ Def, MergedDef->getImportedOwningModule(),
+ /*NotifyListeners*/ false);
+ PendingMergedDefinitionsToDeduplicate.insert(Def);
+ } else {
+ auto SubmoduleID = MergedDef->getOwningModuleID();
+ assert(SubmoduleID && "hidden definition in no module");
+ HiddenNamesMap[getSubmodule(SubmoduleID)].push_back(Def);
+ }
+ }
+}
+
bool ASTReader::loadGlobalIndex() {
if (GlobalIndex)
return false;
@@ -3411,7 +3545,7 @@ bool ASTReader::loadGlobalIndex() {
if (TriedLoadingGlobalIndex || !UseGlobalIndex ||
!Context.getLangOpts().Modules)
return true;
-
+
// Try to load the global index.
TriedLoadingGlobalIndex = true;
StringRef ModuleCachePath
@@ -3445,7 +3579,7 @@ static void updateModuleTimestamp(ModuleFile &MF) {
/// cursor into the start of the given block ID, returning false on success and
/// true on failure.
static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) {
- while (1) {
+ while (true) {
llvm::BitstreamEntry Entry = Cursor.advance();
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
@@ -3474,7 +3608,8 @@ static bool SkipCursorToBlock(BitstreamCursor &Cursor, unsigned BlockID) {
ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
ModuleKind Type,
SourceLocation ImportLoc,
- unsigned ClientLoadCapabilities) {
+ unsigned ClientLoadCapabilities,
+ SmallVectorImpl<ImportedSubmodule> *Imported) {
llvm::SaveAndRestore<SourceLocation>
SetCurImportLocRAII(CurrentImportLoc, ImportLoc);
@@ -3534,12 +3669,12 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
return Result;
}
- // Once read, set the ModuleFile bit base offset and update the size in
+ // Once read, set the ModuleFile bit base offset and update the size in
// bits of all files we've seen.
F.GlobalBitOffset = TotalModulesSizeInBits;
TotalModulesSizeInBits += F.SizeInBits;
GlobalBitOffsetsMap.insert(std::make_pair(F.GlobalBitOffset, &F));
-
+
// Preload SLocEntries.
for (unsigned I = 0, N = F.PreloadSLocEntries.size(); I != N; ++I) {
int Index = int(F.PreloadSLocEntries[I] - 1) + F.SLocEntryBaseID;
@@ -3592,7 +3727,8 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
}
if (!Context.getLangOpts().CPlusPlus ||
- (Type != MK_ImplicitModule && Type != MK_ExplicitModule)) {
+ (Type != MK_ImplicitModule && Type != MK_ExplicitModule &&
+ Type != MK_PrebuiltModule)) {
// Mark all of the identifiers in the identifier table as being out of date,
// so that various accessors know to check the loaded modules when the
// identifier is used.
@@ -3608,7 +3744,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
// Mark selectors as out of date.
for (auto Sel : SelectorGeneration)
SelectorOutOfDate[Sel.first] = true;
-
+
// Resolve any unresolved module exports.
for (unsigned I = 0, N = UnresolvedModuleRefs.size(); I != N; ++I) {
UnresolvedModuleRef &Unresolved = UnresolvedModuleRefs[I];
@@ -3639,10 +3775,14 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
}
UnresolvedModuleRefs.clear();
+ if (Imported)
+ Imported->append(ImportedModules.begin(),
+ ImportedModules.end());
+
// FIXME: How do we load the 'use'd modules? They may not be submodules.
// Might be unnecessary as use declarations are only used to build the
// module itself.
-
+
InitializeContext();
if (SemaObj)
@@ -3653,7 +3793,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
ModuleFile &PrimaryModule = ModuleMgr.getPrimaryModule();
if (PrimaryModule.OriginalSourceFileID.isValid()) {
- PrimaryModule.OriginalSourceFileID
+ PrimaryModule.OriginalSourceFileID
= FileID::get(PrimaryModule.SLocEntryBaseID
+ PrimaryModule.OriginalSourceFileID.getOpaqueValue() - 1);
@@ -3666,11 +3806,11 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
SourceMgr.setMainFileID(PrimaryModule.OriginalSourceFileID);
}
}
-
+
// For any Objective-C class definitions we have already loaded, make sure
// that we load any additional categories.
for (unsigned I = 0, N = ObjCClassesLoaded.size(); I != N; ++I) {
- loadObjCCategories(ObjCClassesLoaded[I]->getGlobalID(),
+ loadObjCCategories(ObjCClassesLoaded[I]->getGlobalID(),
ObjCClassesLoaded[I],
PreviousGeneration);
}
@@ -3693,11 +3833,12 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
return Success;
}
-static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile);
+static ASTFileSignature readASTFileSignature(StringRef PCH);
/// \brief Whether \p Stream starts with the AST/PCH file magic number 'CPCH'.
static bool startsWithASTFileMagic(BitstreamCursor &Stream) {
- return Stream.Read(8) == 'C' &&
+ return Stream.canSkipToPos(4) &&
+ Stream.Read(8) == 'C' &&
Stream.Read(8) == 'P' &&
Stream.Read(8) == 'C' &&
Stream.Read(8) == 'H';
@@ -3709,6 +3850,7 @@ static unsigned moduleKindForDiagnostic(ModuleKind Kind) {
return 0; // PCH
case MK_ImplicitModule:
case MK_ExplicitModule:
+ case MK_PrebuiltModule:
return 1; // module
case MK_MainFile:
case MK_Preamble:
@@ -3750,7 +3892,7 @@ ASTReader::ReadASTCore(StringRef FileName,
// Otherwise, return an error.
Diag(diag::err_module_file_not_found) << moduleKindForDiagnostic(Type)
- << FileName << ErrorStr.empty()
+ << FileName << !ErrorStr.empty()
<< ErrorStr;
return Failure;
@@ -3762,7 +3904,7 @@ ASTReader::ReadASTCore(StringRef FileName,
// Otherwise, return an error.
Diag(diag::err_module_file_out_of_date) << moduleKindForDiagnostic(Type)
- << FileName << ErrorStr.empty()
+ << FileName << !ErrorStr.empty()
<< ErrorStr;
return Failure;
}
@@ -3778,10 +3920,9 @@ ASTReader::ReadASTCore(StringRef FileName,
ModuleFile &F = *M;
BitstreamCursor &Stream = F.Stream;
- PCHContainerRdr.ExtractPCH(F.Buffer->getMemBufferRef(), F.StreamFile);
- Stream.init(&F.StreamFile);
+ Stream = BitstreamCursor(PCHContainerRdr.ExtractPCH(*F.Buffer));
F.SizeInBits = F.Buffer->getBufferSize() * 8;
-
+
// Sniff for the signature.
if (!startsWithASTFileMagic(Stream)) {
Diag(diag::err_module_file_invalid) << moduleKindForDiagnostic(Type)
@@ -3791,16 +3932,16 @@ ASTReader::ReadASTCore(StringRef FileName,
// This is used for compatibility with older PCH formats.
bool HaveReadControlBlock = false;
- while (1) {
+ while (true) {
llvm::BitstreamEntry Entry = Stream.advance();
-
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::Error:
case llvm::BitstreamEntry::Record:
case llvm::BitstreamEntry::EndBlock:
Error("invalid record at top-level of AST file");
return Failure;
-
+
case llvm::BitstreamEntry::SubBlock:
break;
}
@@ -3814,7 +3955,8 @@ ASTReader::ReadASTCore(StringRef FileName,
//
// FIXME: Should we also perform the converse check? Loading a module as
// a PCH file sort of works, but it's a bit wonky.
- if ((Type == MK_ImplicitModule || Type == MK_ExplicitModule) &&
+ if ((Type == MK_ImplicitModule || Type == MK_ExplicitModule ||
+ Type == MK_PrebuiltModule) &&
F.ModuleName.empty()) {
auto Result = (Type == MK_ImplicitModule) ? OutOfDate : Failure;
if (Result != OutOfDate ||
@@ -3930,26 +4072,26 @@ ASTReader::ASTReadResult ASTReader::ReadExtensionBlock(ModuleFile &F) {
void ASTReader::InitializeContext() {
// If there's a listener, notify them that we "read" the translation unit.
if (DeserializationListener)
- DeserializationListener->DeclRead(PREDEF_DECL_TRANSLATION_UNIT_ID,
+ DeserializationListener->DeclRead(PREDEF_DECL_TRANSLATION_UNIT_ID,
Context.getTranslationUnitDecl());
// FIXME: Find a better way to deal with collisions between these
// built-in types. Right now, we just ignore the problem.
-
+
// Load the special types.
if (SpecialTypes.size() >= NumSpecialTypeIDs) {
if (unsigned String = SpecialTypes[SPECIAL_TYPE_CF_CONSTANT_STRING]) {
if (!Context.CFConstantStringTypeDecl)
Context.setCFConstantStringType(GetType(String));
}
-
+
if (unsigned File = SpecialTypes[SPECIAL_TYPE_FILE]) {
QualType FileType = GetType(File);
if (FileType.isNull()) {
Error("FILE type is NULL");
return;
}
-
+
if (!Context.FILEDecl) {
if (const TypedefType *Typedef = FileType->getAs<TypedefType>())
Context.setFILEDecl(Typedef->getDecl());
@@ -3963,14 +4105,14 @@ void ASTReader::InitializeContext() {
}
}
}
-
+
if (unsigned Jmp_buf = SpecialTypes[SPECIAL_TYPE_JMP_BUF]) {
QualType Jmp_bufType = GetType(Jmp_buf);
if (Jmp_bufType.isNull()) {
Error("jmp_buf type is NULL");
return;
}
-
+
if (!Context.jmp_bufDecl) {
if (const TypedefType *Typedef = Jmp_bufType->getAs<TypedefType>())
Context.setjmp_bufDecl(Typedef->getDecl());
@@ -3984,14 +4126,14 @@ void ASTReader::InitializeContext() {
}
}
}
-
+
if (unsigned Sigjmp_buf = SpecialTypes[SPECIAL_TYPE_SIGJMP_BUF]) {
QualType Sigjmp_bufType = GetType(Sigjmp_buf);
if (Sigjmp_bufType.isNull()) {
Error("sigjmp_buf type is NULL");
return;
}
-
+
if (!Context.sigjmp_bufDecl) {
if (const TypedefType *Typedef = Sigjmp_bufType->getAs<TypedefType>())
Context.setsigjmp_bufDecl(Typedef->getDecl());
@@ -4039,7 +4181,7 @@ void ASTReader::InitializeContext() {
}
}
}
-
+
ReadPragmaDiagnosticMappings(Context.getDiagnostics());
// If there were any CUDA special declarations, deserialize them.
@@ -4067,10 +4209,10 @@ void ASTReader::finalizeForWriting() {
// Nothing to do for now.
}
-/// \brief Reads and return the signature record from \p StreamFile's control
-/// block, or else returns 0.
-static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile){
- BitstreamCursor Stream(StreamFile);
+/// \brief Reads and return the signature record from \p PCH's control block, or
+/// else returns 0.
+static ASTFileSignature readASTFileSignature(StringRef PCH) {
+ BitstreamCursor Stream(PCH);
if (!startsWithASTFileMagic(Stream))
return 0;
@@ -4080,10 +4222,9 @@ static ASTFileSignature readASTFileSignature(llvm::BitstreamReader &StreamFile){
// Scan for SIGNATURE inside the control block.
ASTReader::RecordData Record;
- while (1) {
+ while (true) {
llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
- if (Entry.Kind == llvm::BitstreamEntry::EndBlock ||
- Entry.Kind != llvm::BitstreamEntry::Record)
+ if (Entry.Kind != llvm::BitstreamEntry::Record)
return 0;
Record.clear();
@@ -4108,16 +4249,14 @@ std::string ASTReader::getOriginalSourceFile(
}
// Initialize the stream
- llvm::BitstreamReader StreamFile;
- PCHContainerRdr.ExtractPCH((*Buffer)->getMemBufferRef(), StreamFile);
- BitstreamCursor Stream(StreamFile);
+ BitstreamCursor Stream(PCHContainerRdr.ExtractPCH(**Buffer));
// Sniff for the signature.
if (!startsWithASTFileMagic(Stream)) {
Diags.Report(diag::err_fe_not_a_pch_file) << ASTFileName;
return std::string();
}
-
+
// Scan for the CONTROL_BLOCK_ID block.
if (SkipCursorToBlock(Stream, CONTROL_BLOCK_ID)) {
Diags.Report(diag::err_fe_pch_malformed_block) << ASTFileName;
@@ -4126,16 +4265,16 @@ std::string ASTReader::getOriginalSourceFile(
// Scan for ORIGINAL_FILE inside the control block.
RecordData Record;
- while (1) {
+ while (true) {
llvm::BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
if (Entry.Kind == llvm::BitstreamEntry::EndBlock)
return std::string();
-
+
if (Entry.Kind != llvm::BitstreamEntry::Record) {
Diags.Report(diag::err_fe_pch_malformed_block) << ASTFileName;
return std::string();
}
-
+
Record.clear();
StringRef Blob;
if (Stream.readRecord(Entry.ID, Record, &Blob) == ORIGINAL_FILE)
@@ -4144,6 +4283,7 @@ std::string ASTReader::getOriginalSourceFile(
}
namespace {
+
class SimplePCHValidator : public ASTReaderListener {
const LangOptions &ExistingLangOpts;
const TargetOptions &ExistingTargetOpts;
@@ -4170,11 +4310,13 @@ namespace {
return checkLanguageOptions(ExistingLangOpts, LangOpts, nullptr,
AllowCompatibleDifferences);
}
+
bool ReadTargetOptions(const TargetOptions &TargetOpts, bool Complain,
bool AllowCompatibleDifferences) override {
return checkTargetOptions(ExistingTargetOpts, TargetOpts, nullptr,
AllowCompatibleDifferences);
}
+
bool ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
StringRef SpecificModuleCachePath,
bool Complain) override {
@@ -4182,6 +4324,7 @@ namespace {
ExistingModuleCachePath,
nullptr, ExistingLangOpts);
}
+
bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
bool Complain,
std::string &SuggestedPredefines) override {
@@ -4189,13 +4332,14 @@ namespace {
SuggestedPredefines, ExistingLangOpts);
}
};
-}
+
+} // end anonymous namespace
bool ASTReader::readASTFileControlBlock(
StringRef Filename, FileManager &FileMgr,
const PCHContainerReader &PCHContainerRdr,
bool FindModuleFileExtensions,
- ASTReaderListener &Listener) {
+ ASTReaderListener &Listener, bool ValidateDiagnosticOptions) {
// Open the AST file.
// FIXME: This allows use of the VFS; we do not allow use of the
// VFS when actually loading a module.
@@ -4205,9 +4349,7 @@ bool ASTReader::readASTFileControlBlock(
}
// Initialize the stream
- llvm::BitstreamReader StreamFile;
- PCHContainerRdr.ExtractPCH((*Buffer)->getMemBufferRef(), StreamFile);
- BitstreamCursor Stream(StreamFile);
+ BitstreamCursor Stream(PCHContainerRdr.ExtractPCH(**Buffer));
// Sniff for the signature.
if (!startsWithASTFileMagic(Stream))
@@ -4235,7 +4377,8 @@ bool ASTReader::readASTFileControlBlock(
std::string IgnoredSuggestedPredefines;
if (ReadOptionsBlock(Stream, ARR_ConfigurationMismatch | ARR_OutOfDate,
/*AllowCompatibleConfigurationMismatch*/ false,
- Listener, IgnoredSuggestedPredefines) != Success)
+ Listener, IgnoredSuggestedPredefines,
+ ValidateDiagnosticOptions) != Success)
return true;
break;
}
@@ -4280,7 +4423,7 @@ bool ASTReader::readASTFileControlBlock(
if (Listener.ReadFullVersionInformation(Blob))
return true;
-
+
break;
}
case MODULE_NAME:
@@ -4408,7 +4551,8 @@ bool ASTReader::isAcceptableASTFile(
ExistingModuleCachePath, FileMgr);
return !readASTFileControlBlock(Filename, FileMgr, PCHContainerRdr,
/*FindModuleFileExtensions=*/false,
- validator);
+ validator,
+ /*ValidateDiagnosticOptions=*/true);
}
ASTReader::ASTReadResult
@@ -4425,7 +4569,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
RecordData Record;
while (true) {
llvm::BitstreamEntry Entry = F.Stream.advanceSkippingSubblocks();
-
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
case llvm::BitstreamEntry::Error:
@@ -4523,7 +4667,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
CurrentModule->ConfigMacrosExhaustive = ConfigMacrosExhaustive;
if (DeserializationListener)
DeserializationListener->ModuleRead(GlobalID, CurrentModule);
-
+
SubmodulesLoaded[GlobalIndex] = CurrentModule;
// Clear out data that will be replaced by what is in the module file.
@@ -4563,7 +4707,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
break;
}
-
+
case SUBMODULE_HEADER:
case SUBMODULE_EXCLUDED_HEADER:
case SUBMODULE_PRIVATE_HEADER:
@@ -4597,17 +4741,17 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
break;
}
-
+
case SUBMODULE_METADATA: {
F.BaseSubmoduleID = getTotalNumSubmodules();
F.LocalNumSubmodules = Record[0];
unsigned LocalBaseSubmoduleID = Record[1];
if (F.LocalNumSubmodules > 0) {
- // Introduce the global -> local mapping for submodules within this
+ // Introduce the global -> local mapping for submodules within this
// module.
GlobalSubmoduleMap.insert(std::make_pair(getTotalNumSubmodules()+1,&F));
-
- // Introduce the local -> global mapping for submodules within this
+
+ // Introduce the local -> global mapping for submodules within this
// module.
F.SubmoduleRemap.insertOrReplace(
std::make_pair(LocalBaseSubmoduleID,
@@ -4617,7 +4761,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
break;
}
-
+
case SUBMODULE_IMPORTS: {
for (unsigned Idx = 0; Idx != Record.size(); ++Idx) {
UnresolvedModuleRef Unresolved;
@@ -4641,8 +4785,8 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
Unresolved.IsWildcard = Record[Idx + 1];
UnresolvedModuleRefs.push_back(Unresolved);
}
-
- // Once we've loaded the set of exports, there's no reason to keep
+
+ // Once we've loaded the set of exports, there's no reason to keep
// the parsed, unresolved exports around.
CurrentModule->UnresolvedExports.clear();
break;
@@ -4673,6 +4817,13 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
UnresolvedModuleRefs.push_back(Unresolved);
break;
}
+
+ case SUBMODULE_INITIALIZERS:
+ SmallVector<uint32_t, 16> Inits;
+ for (auto &ID : Record)
+ Inits.push_back(getGlobalDeclID(F, ID));
+ Context.addLazyModuleInitializers(CurrentModule, Inits);
+ break;
}
}
}
@@ -4848,7 +4999,7 @@ std::pair<ModuleFile *, unsigned>
ASTReader::getModulePreprocessedEntity(unsigned GlobalIndex) {
GlobalPreprocessedEntityMapType::iterator
I = GlobalPreprocessedEntityMap.find(GlobalIndex);
- assert(I != GlobalPreprocessedEntityMap.end() &&
+ assert(I != GlobalPreprocessedEntityMap.end() &&
"Corrupted global preprocessed entity map");
ModuleFile *M = I->second;
unsigned LocalIndex = GlobalIndex - M->BasePreprocessedEntityID;
@@ -4884,8 +5035,8 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
Error("no preprocessing record");
return nullptr;
}
-
- SavedStreamPosition SavedPosition(M.PreprocessorDetailCursor);
+
+ SavedStreamPosition SavedPosition(M.PreprocessorDetailCursor);
M.PreprocessorDetailCursor.JumpToBit(PPOffs.BitOffset);
llvm::BitstreamEntry Entry =
@@ -4924,7 +5075,7 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
return ME;
}
-
+
case PPD_MACRO_DEFINITION: {
// Decode the identifier info and then check again; if the macro is
// still defined and associated with the identifier,
@@ -4936,14 +5087,14 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
return MD;
}
-
+
case PPD_INCLUSION_DIRECTIVE: {
const char *FullFileNameStart = Blob.data() + Record[0];
StringRef FullFileName(FullFileNameStart, Blob.size() - Record[0]);
const FileEntry *File = nullptr;
if (!FullFileName.empty())
File = PP.getFileManager().getFile(FullFileName);
-
+
// FIXME: Stable encoding
InclusionDirective::InclusionKind Kind
= static_cast<InclusionDirective::InclusionKind>(Record[2]);
@@ -5006,7 +5157,7 @@ struct PPEntityComp {
}
};
-}
+} // end anonymous namespace
PreprocessedEntityID ASTReader::findPreprocessedEntity(SourceLocation Loc,
bool EndsAfter) const {
@@ -5084,11 +5235,11 @@ Optional<bool> ASTReader::isPreprocessedEntityInFileID(unsigned Index,
ModuleFile &M = *PPInfo.first;
unsigned LocalIndex = PPInfo.second;
const PPEntityOffset &PPOffs = M.PreprocessedEntityOffsets[LocalIndex];
-
+
SourceLocation Loc = TranslateSourceLocation(M, PPOffs.getBegin());
if (Loc.isInvalid())
return false;
-
+
if (SourceMgr.isInFileID(SourceMgr.getFileLoc(Loc), FID))
return true;
else
@@ -5096,12 +5247,13 @@ Optional<bool> ASTReader::isPreprocessedEntityInFileID(unsigned Index,
}
namespace {
+
/// \brief Visitor used to search for information about a header file.
class HeaderFileInfoVisitor {
const FileEntry *FE;
-
+
Optional<HeaderFileInfo> HFI;
-
+
public:
explicit HeaderFileInfoVisitor(const FileEntry *FE)
: FE(FE) { }
@@ -5120,17 +5272,18 @@ namespace {
HFI = *Pos;
return true;
}
-
+
Optional<HeaderFileInfo> getHeaderFileInfo() const { return HFI; }
};
-}
+
+} // end anonymous namespace
HeaderFileInfo ASTReader::GetHeaderFileInfo(const FileEntry *FE) {
HeaderFileInfoVisitor Visitor(FE);
ModuleMgr.visit(Visitor);
if (Optional<HeaderFileInfo> HFI = Visitor.getHeaderFileInfo())
return *HFI;
-
+
return HeaderFileInfo();
}
@@ -5152,7 +5305,7 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
FullSourceLoc(Loc, SourceMgr)));
continue;
}
-
+
assert(DiagStateID == 0);
// A new DiagState was created here.
Diag.DiagStates.push_back(*Diag.GetCurDiagState());
@@ -5161,7 +5314,7 @@ void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
Diag.DiagStatePoints.push_back(
DiagnosticsEngine::DiagStatePoint(NewState,
FullSourceLoc(Loc, SourceMgr)));
- while (1) {
+ while (true) {
assert(Idx < F.PragmaDiagMappings.size() &&
"Invalid data, didn't find '-1' marking end of diag/map pairs");
if (Idx >= F.PragmaDiagMappings.size()) {
@@ -5298,7 +5451,7 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
QualType ClassType = readType(*Loc.F, Record, Idx);
if (PointeeType.isNull() || ClassType.isNull())
return QualType();
-
+
return Context.getMemberPointerType(PointeeType, ClassType.getTypePtr());
}
@@ -5408,7 +5561,7 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
return Context.getTypeDeclType(
ReadDeclAs<UnresolvedUsingTypenameDecl>(*Loc.F, Record, Idx));
}
-
+
case TYPE_TYPEDEF: {
if (Record.size() != 2) {
Error("incorrect encoding of typedef type");
@@ -5529,6 +5682,16 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
return Context.getObjCInterfaceType(ItfD->getCanonicalDecl());
}
+ case TYPE_OBJC_TYPE_PARAM: {
+ unsigned Idx = 0;
+ ObjCTypeParamDecl *Decl
+ = ReadDeclAs<ObjCTypeParamDecl>(*Loc.F, Record, Idx);
+ unsigned NumProtos = Record[Idx++];
+ SmallVector<ObjCProtocolDecl*, 4> Protos;
+ for (unsigned I = 0; I != NumProtos; ++I)
+ Protos.push_back(ReadDeclAs<ObjCProtocolDecl>(*Loc.F, Record, Idx));
+ return Context.getObjCTypeParamType(Decl, Protos);
+ }
case TYPE_OBJC_OBJECT: {
unsigned Idx = 0;
QualType Base = readType(*Loc.F, Record, Idx);
@@ -5666,15 +5829,17 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
}
case TYPE_PIPE: {
- if (Record.size() != 1) {
+ if (Record.size() != 2) {
Error("Incorrect encoding of pipe type");
return QualType();
}
// Reading the pipe element type.
QualType ElementType = readType(*Loc.F, Record, Idx);
- return Context.getPipeType(ElementType);
+ unsigned ReadOnly = Record[1];
+ return Context.getPipeType(ElementType, ReadOnly);
}
+
}
llvm_unreachable("Invalid TypeCode!");
}
@@ -5701,26 +5866,27 @@ void ASTReader::readExceptionSpec(ModuleFile &ModuleFile,
}
class clang::TypeLocReader : public TypeLocVisitor<TypeLocReader> {
- ASTReader &Reader;
- ModuleFile &F;
+ ModuleFile *F;
+ ASTReader *Reader;
const ASTReader::RecordData &Record;
unsigned &Idx;
- SourceLocation ReadSourceLocation(const ASTReader::RecordData &R,
- unsigned &I) {
- return Reader.ReadSourceLocation(F, R, I);
+ SourceLocation ReadSourceLocation() {
+ return Reader->ReadSourceLocation(*F, Record, Idx);
}
- template<typename T>
- T *ReadDeclAs(const ASTReader::RecordData &Record, unsigned &Idx) {
- return Reader.ReadDeclAs<T>(F, Record, Idx);
+ TypeSourceInfo *GetTypeSourceInfo() {
+ return Reader->GetTypeSourceInfo(*F, Record, Idx);
}
-
+
+ NestedNameSpecifierLoc ReadNestedNameSpecifierLoc() {
+ return Reader->ReadNestedNameSpecifierLoc(*F, Record, Idx);
+ }
+
public:
- TypeLocReader(ASTReader &Reader, ModuleFile &F,
+ TypeLocReader(ModuleFile &F, ASTReader &Reader,
const ASTReader::RecordData &Record, unsigned &Idx)
- : Reader(Reader), F(F), Record(Record), Idx(Idx)
- { }
+ : F(&F), Reader(&Reader), Record(Record), Idx(Idx) {}
// We want compile-time assurance that we've enumerated all of
// these, so unfortunately we have to declare them first, then
@@ -5737,8 +5903,9 @@ public:
void TypeLocReader::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
// nothing to do
}
+
void TypeLocReader::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
- TL.setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ TL.setBuiltinLoc(ReadSourceLocation());
if (TL.needsExtraLocalData()) {
TL.setWrittenTypeSpec(static_cast<DeclSpec::TST>(Record[Idx++]));
TL.setWrittenSignSpec(static_cast<DeclSpec::TSS>(Record[Idx++]));
@@ -5746,219 +5913,264 @@ void TypeLocReader::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
TL.setModeAttr(Record[Idx++]);
}
}
+
void TypeLocReader::VisitComplexTypeLoc(ComplexTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitPointerTypeLoc(PointerTypeLoc TL) {
- TL.setStarLoc(ReadSourceLocation(Record, Idx));
+ TL.setStarLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitDecayedTypeLoc(DecayedTypeLoc TL) {
// nothing to do
}
+
void TypeLocReader::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
// nothing to do
}
+
void TypeLocReader::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
- TL.setCaretLoc(ReadSourceLocation(Record, Idx));
+ TL.setCaretLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
- TL.setAmpLoc(ReadSourceLocation(Record, Idx));
+ TL.setAmpLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
- TL.setAmpAmpLoc(ReadSourceLocation(Record, Idx));
+ TL.setAmpAmpLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
- TL.setStarLoc(ReadSourceLocation(Record, Idx));
- TL.setClassTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
+ TL.setStarLoc(ReadSourceLocation());
+ TL.setClassTInfo(GetTypeSourceInfo());
}
+
void TypeLocReader::VisitArrayTypeLoc(ArrayTypeLoc TL) {
- TL.setLBracketLoc(ReadSourceLocation(Record, Idx));
- TL.setRBracketLoc(ReadSourceLocation(Record, Idx));
+ TL.setLBracketLoc(ReadSourceLocation());
+ TL.setRBracketLoc(ReadSourceLocation());
if (Record[Idx++])
- TL.setSizeExpr(Reader.ReadExpr(F));
+ TL.setSizeExpr(Reader->ReadExpr(*F));
else
TL.setSizeExpr(nullptr);
}
+
void TypeLocReader::VisitConstantArrayTypeLoc(ConstantArrayTypeLoc TL) {
VisitArrayTypeLoc(TL);
}
+
void TypeLocReader::VisitIncompleteArrayTypeLoc(IncompleteArrayTypeLoc TL) {
VisitArrayTypeLoc(TL);
}
+
void TypeLocReader::VisitVariableArrayTypeLoc(VariableArrayTypeLoc TL) {
VisitArrayTypeLoc(TL);
}
+
void TypeLocReader::VisitDependentSizedArrayTypeLoc(
DependentSizedArrayTypeLoc TL) {
VisitArrayTypeLoc(TL);
}
+
void TypeLocReader::VisitDependentSizedExtVectorTypeLoc(
DependentSizedExtVectorTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitVectorTypeLoc(VectorTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
- TL.setLocalRangeBegin(ReadSourceLocation(Record, Idx));
- TL.setLParenLoc(ReadSourceLocation(Record, Idx));
- TL.setRParenLoc(ReadSourceLocation(Record, Idx));
- TL.setLocalRangeEnd(ReadSourceLocation(Record, Idx));
+ TL.setLocalRangeBegin(ReadSourceLocation());
+ TL.setLParenLoc(ReadSourceLocation());
+ TL.setRParenLoc(ReadSourceLocation());
+ TL.setLocalRangeEnd(ReadSourceLocation());
for (unsigned i = 0, e = TL.getNumParams(); i != e; ++i) {
- TL.setParam(i, ReadDeclAs<ParmVarDecl>(Record, Idx));
+ TL.setParam(i, Reader->ReadDeclAs<ParmVarDecl>(*F, Record, Idx));
}
}
+
void TypeLocReader::VisitFunctionProtoTypeLoc(FunctionProtoTypeLoc TL) {
VisitFunctionTypeLoc(TL);
}
+
void TypeLocReader::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
VisitFunctionTypeLoc(TL);
}
void TypeLocReader::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
void TypeLocReader::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
void TypeLocReader::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
- TL.setTypeofLoc(ReadSourceLocation(Record, Idx));
- TL.setLParenLoc(ReadSourceLocation(Record, Idx));
- TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setTypeofLoc(ReadSourceLocation());
+ TL.setLParenLoc(ReadSourceLocation());
+ TL.setRParenLoc(ReadSourceLocation());
}
void TypeLocReader::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
- TL.setTypeofLoc(ReadSourceLocation(Record, Idx));
- TL.setLParenLoc(ReadSourceLocation(Record, Idx));
- TL.setRParenLoc(ReadSourceLocation(Record, Idx));
- TL.setUnderlyingTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
+ TL.setTypeofLoc(ReadSourceLocation());
+ TL.setLParenLoc(ReadSourceLocation());
+ TL.setRParenLoc(ReadSourceLocation());
+ TL.setUnderlyingTInfo(GetTypeSourceInfo());
}
void TypeLocReader::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
- TL.setKWLoc(ReadSourceLocation(Record, Idx));
- TL.setLParenLoc(ReadSourceLocation(Record, Idx));
- TL.setRParenLoc(ReadSourceLocation(Record, Idx));
- TL.setUnderlyingTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
+ TL.setKWLoc(ReadSourceLocation());
+ TL.setLParenLoc(ReadSourceLocation());
+ TL.setRParenLoc(ReadSourceLocation());
+ TL.setUnderlyingTInfo(GetTypeSourceInfo());
}
+
void TypeLocReader::VisitAutoTypeLoc(AutoTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitRecordTypeLoc(RecordTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitEnumTypeLoc(EnumTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
- TL.setAttrNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setAttrNameLoc(ReadSourceLocation());
if (TL.hasAttrOperand()) {
SourceRange range;
- range.setBegin(ReadSourceLocation(Record, Idx));
- range.setEnd(ReadSourceLocation(Record, Idx));
+ range.setBegin(ReadSourceLocation());
+ range.setEnd(ReadSourceLocation());
TL.setAttrOperandParensRange(range);
}
if (TL.hasAttrExprOperand()) {
if (Record[Idx++])
- TL.setAttrExprOperand(Reader.ReadExpr(F));
+ TL.setAttrExprOperand(Reader->ReadExpr(*F));
else
TL.setAttrExprOperand(nullptr);
} else if (TL.hasAttrEnumOperand())
- TL.setAttrEnumOperandLoc(ReadSourceLocation(Record, Idx));
+ TL.setAttrEnumOperandLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitSubstTemplateTypeParmTypeLoc(
SubstTemplateTypeParmTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
void TypeLocReader::VisitSubstTemplateTypeParmPackTypeLoc(
SubstTemplateTypeParmPackTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
void TypeLocReader::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
- TL.setTemplateKeywordLoc(ReadSourceLocation(Record, Idx));
- TL.setTemplateNameLoc(ReadSourceLocation(Record, Idx));
- TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
- TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setTemplateKeywordLoc(ReadSourceLocation());
+ TL.setTemplateNameLoc(ReadSourceLocation());
+ TL.setLAngleLoc(ReadSourceLocation());
+ TL.setRAngleLoc(ReadSourceLocation());
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
- TL.setArgLocInfo(i,
- Reader.GetTemplateArgumentLocInfo(F,
- TL.getTypePtr()->getArg(i).getKind(),
- Record, Idx));
+ TL.setArgLocInfo(
+ i,
+ Reader->GetTemplateArgumentLocInfo(
+ *F, TL.getTypePtr()->getArg(i).getKind(), Record, Idx));
}
void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
- TL.setLParenLoc(ReadSourceLocation(Record, Idx));
- TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation());
+ TL.setRParenLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
- TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
+ TL.setElaboratedKeywordLoc(ReadSourceLocation());
+ TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
}
+
void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
- TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
- TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setElaboratedKeywordLoc(ReadSourceLocation());
+ TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
+ TL.setNameLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
- TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
- TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
- TL.setTemplateKeywordLoc(ReadSourceLocation(Record, Idx));
- TL.setTemplateNameLoc(ReadSourceLocation(Record, Idx));
- TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
- TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setElaboratedKeywordLoc(ReadSourceLocation());
+ TL.setQualifierLoc(ReadNestedNameSpecifierLoc());
+ TL.setTemplateKeywordLoc(ReadSourceLocation());
+ TL.setTemplateNameLoc(ReadSourceLocation());
+ TL.setLAngleLoc(ReadSourceLocation());
+ TL.setRAngleLoc(ReadSourceLocation());
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
- TL.setArgLocInfo(I,
- Reader.GetTemplateArgumentLocInfo(F,
- TL.getTypePtr()->getArg(I).getKind(),
- Record, Idx));
+ TL.setArgLocInfo(
+ I,
+ Reader->GetTemplateArgumentLocInfo(
+ *F, TL.getTypePtr()->getArg(I).getKind(), Record, Idx));
}
+
void TypeLocReader::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
- TL.setEllipsisLoc(ReadSourceLocation(Record, Idx));
+ TL.setEllipsisLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
- TL.setNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setNameLoc(ReadSourceLocation());
}
+
+void TypeLocReader::VisitObjCTypeParamTypeLoc(ObjCTypeParamTypeLoc TL) {
+ if (TL.getNumProtocols()) {
+ TL.setProtocolLAngleLoc(ReadSourceLocation());
+ TL.setProtocolRAngleLoc(ReadSourceLocation());
+ }
+ for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
+ TL.setProtocolLoc(i, ReadSourceLocation());
+}
+
void TypeLocReader::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
TL.setHasBaseTypeAsWritten(Record[Idx++]);
- TL.setTypeArgsLAngleLoc(ReadSourceLocation(Record, Idx));
- TL.setTypeArgsRAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setTypeArgsLAngleLoc(ReadSourceLocation());
+ TL.setTypeArgsRAngleLoc(ReadSourceLocation());
for (unsigned i = 0, e = TL.getNumTypeArgs(); i != e; ++i)
- TL.setTypeArgTInfo(i, Reader.GetTypeSourceInfo(F, Record, Idx));
- TL.setProtocolLAngleLoc(ReadSourceLocation(Record, Idx));
- TL.setProtocolRAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setTypeArgTInfo(i, GetTypeSourceInfo());
+ TL.setProtocolLAngleLoc(ReadSourceLocation());
+ TL.setProtocolRAngleLoc(ReadSourceLocation());
for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
- TL.setProtocolLoc(i, ReadSourceLocation(Record, Idx));
+ TL.setProtocolLoc(i, ReadSourceLocation());
}
+
void TypeLocReader::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
- TL.setStarLoc(ReadSourceLocation(Record, Idx));
+ TL.setStarLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
- TL.setKWLoc(ReadSourceLocation(Record, Idx));
- TL.setLParenLoc(ReadSourceLocation(Record, Idx));
- TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setKWLoc(ReadSourceLocation());
+ TL.setLParenLoc(ReadSourceLocation());
+ TL.setRParenLoc(ReadSourceLocation());
}
+
void TypeLocReader::VisitPipeTypeLoc(PipeTypeLoc TL) {
- TL.setKWLoc(ReadSourceLocation(Record, Idx));
+ TL.setKWLoc(ReadSourceLocation());
}
-TypeSourceInfo *ASTReader::GetTypeSourceInfo(ModuleFile &F,
- const RecordData &Record,
- unsigned &Idx) {
+TypeSourceInfo *
+ASTReader::GetTypeSourceInfo(ModuleFile &F, const ASTReader::RecordData &Record,
+ unsigned &Idx) {
QualType InfoTy = readType(F, Record, Idx);
if (InfoTy.isNull())
return nullptr;
TypeSourceInfo *TInfo = getContext().CreateTypeSourceInfo(InfoTy);
- TypeLocReader TLR(*this, F, Record, Idx);
+ TypeLocReader TLR(F, *this, Record, Idx);
for (TypeLoc TL = TInfo->getTypeLoc(); !TL.isNull(); TL = TL.getNextTypeLoc())
TLR.Visit(TL);
return TInfo;
@@ -6141,18 +6353,18 @@ QualType ASTReader::getLocalType(ModuleFile &F, unsigned LocalID) {
return GetType(getGlobalTypeID(F, LocalID));
}
-serialization::TypeID
+serialization::TypeID
ASTReader::getGlobalTypeID(ModuleFile &F, unsigned LocalID) const {
unsigned FastQuals = LocalID & Qualifiers::FastMask;
unsigned LocalIndex = LocalID >> Qualifiers::FastWidth;
-
+
if (LocalIndex < NUM_PREDEF_TYPE_IDS)
return LocalID;
ContinuousRangeMap<uint32_t, int, 2>::iterator I
= F.TypeRemap.find(LocalIndex - NUM_PREDEF_TYPE_IDS);
assert(I != F.TypeRemap.end() && "Invalid index into type index remap");
-
+
unsigned GlobalIndex = LocalIndex + I->second;
return (GlobalIndex << Qualifiers::FastWidth) | FastQuals;
}
@@ -6168,18 +6380,18 @@ ASTReader::GetTemplateArgumentLocInfo(ModuleFile &F,
case TemplateArgument::Type:
return GetTypeSourceInfo(F, Record, Index);
case TemplateArgument::Template: {
- NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc(F, Record,
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc(F, Record,
Index);
SourceLocation TemplateNameLoc = ReadSourceLocation(F, Record, Index);
return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
SourceLocation());
}
case TemplateArgument::TemplateExpansion: {
- NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc(F, Record,
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc(F, Record,
Index);
SourceLocation TemplateNameLoc = ReadSourceLocation(F, Record, Index);
SourceLocation EllipsisLoc = ReadSourceLocation(F, Record, Index);
- return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
+ return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
EllipsisLoc);
}
case TemplateArgument::Null:
@@ -6324,7 +6536,7 @@ CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
return Bases;
}
-serialization::DeclID
+serialization::DeclID
ASTReader::getGlobalDeclID(ModuleFile &F, LocalDeclID LocalID) const {
if (LocalID < NUM_PREDEF_DECL_IDS)
return LocalID;
@@ -6332,7 +6544,7 @@ ASTReader::getGlobalDeclID(ModuleFile &F, LocalDeclID LocalID) const {
ContinuousRangeMap<uint32_t, int, 2>::iterator I
= F.DeclRemap.find(LocalID - NUM_PREDEF_DECL_IDS);
assert(I != F.DeclRemap.end() && "Invalid index into decl index remap");
-
+
return LocalID + I->second;
}
@@ -6342,7 +6554,7 @@ bool ASTReader::isDeclIDFromModule(serialization::GlobalDeclID ID,
if (ID < NUM_PREDEF_DECL_IDS)
return false;
- return ID - NUM_PREDEF_DECL_IDS >= M.BaseDeclID &&
+ return ID - NUM_PREDEF_DECL_IDS >= M.BaseDeclID &&
ID - NUM_PREDEF_DECL_IDS < M.BaseDeclID + M.LocalNumDecls;
}
@@ -6474,11 +6686,11 @@ Decl *ASTReader::GetDecl(DeclID ID) {
return DeclsLoaded[Index];
}
-DeclID ASTReader::mapGlobalIDToModuleFileGlobalID(ModuleFile &M,
+DeclID ASTReader::mapGlobalIDToModuleFileGlobalID(ModuleFile &M,
DeclID GlobalID) {
if (GlobalID < NUM_PREDEF_DECL_IDS)
return GlobalID;
-
+
GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(GlobalID);
assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
ModuleFile *Owner = I->second;
@@ -6487,18 +6699,18 @@ DeclID ASTReader::mapGlobalIDToModuleFileGlobalID(ModuleFile &M,
= M.GlobalToLocalDeclIDs.find(Owner);
if (Pos == M.GlobalToLocalDeclIDs.end())
return 0;
-
+
return GlobalID - Owner->BaseDeclID + Pos->second;
}
-serialization::DeclID ASTReader::ReadDeclID(ModuleFile &F,
+serialization::DeclID ASTReader::ReadDeclID(ModuleFile &F,
const RecordData &Record,
unsigned &Idx) {
if (Idx >= Record.size()) {
Error("Corrupted AST file");
return 0;
}
-
+
return getGlobalDeclID(F, Record[Idx++]);
}
@@ -6591,7 +6803,7 @@ public:
}
};
-}
+} // end anonymous namespace
void ASTReader::FindFileRegionDecls(FileID File,
unsigned Offset, unsigned Length,
@@ -6630,7 +6842,7 @@ void ASTReader::FindFileRegionDecls(FileID File,
EndLoc, DIDComp);
if (EndIt != DInfo.Decls.end())
++EndIt;
-
+
for (ArrayRef<serialization::LocalDeclID>::iterator
DIt = BeginIt; DIt != EndIt; ++DIt)
Decls.push_back(GetDecl(getGlobalDeclID(*DInfo.Mod, *DIt)));
@@ -6841,23 +7053,23 @@ void ASTReader::PrintStats() {
std::fprintf(stderr, "\n");
GlobalIndex->printStats();
}
-
+
std::fprintf(stderr, "\n");
dump();
std::fprintf(stderr, "\n");
}
template<typename Key, typename ModuleFile, unsigned InitialCapacity>
-static void
+static void
dumpModuleIDMap(StringRef Name,
- const ContinuousRangeMap<Key, ModuleFile *,
+ const ContinuousRangeMap<Key, ModuleFile *,
InitialCapacity> &Map) {
if (Map.begin() == Map.end())
return;
-
+
typedef ContinuousRangeMap<Key, ModuleFile *, InitialCapacity> MapType;
llvm::errs() << Name << ":\n";
- for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
+ for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
I != IEnd; ++I) {
llvm::errs() << " " << I->first << " -> " << I->second->FileName
<< "\n";
@@ -6874,11 +7086,11 @@ LLVM_DUMP_METHOD void ASTReader::dump() {
dumpModuleIDMap("Global macro map", GlobalMacroMap);
dumpModuleIDMap("Global submodule map", GlobalSubmoduleMap);
dumpModuleIDMap("Global selector map", GlobalSelectorMap);
- dumpModuleIDMap("Global preprocessed entity map",
+ dumpModuleIDMap("Global preprocessed entity map",
GlobalPreprocessedEntityMap);
-
+
llvm::errs() << "\n*** PCH/Modules Loaded:";
- for (ModuleManager::ModuleConstIterator M = ModuleMgr.begin(),
+ for (ModuleManager::ModuleConstIterator M = ModuleMgr.begin(),
MEnd = ModuleMgr.end();
M != MEnd; ++M)
(*M)->dump();
@@ -6921,14 +7133,9 @@ void ASTReader::InitializeSema(Sema &S) {
SemaObj->FPFeatures.fp_contract = FPPragmaOptions[0];
}
- // FIXME: What happens if these are changed by a module import?
- if (!OpenCLExtensions.empty()) {
- unsigned I = 0;
-#define OPENCLEXT(nm) SemaObj->OpenCLFeatures.nm = OpenCLExtensions[I++];
-#include "clang/Basic/OpenCLExtensions.def"
-
- assert(OpenCLExtensions.size() == I && "Wrong number of OPENCL_EXTENSIONS");
- }
+ SemaObj->OpenCLFeatures.copy(OpenCLExtensions);
+ SemaObj->OpenCLTypeExtMap = OpenCLTypeExtMap;
+ SemaObj->OpenCLDeclExtMap = OpenCLDeclExtMap;
UpdateSema();
}
@@ -6939,12 +7146,14 @@ void ASTReader::UpdateSema() {
// Load the offsets of the declarations that Sema references.
// They will be lazily deserialized when needed.
if (!SemaDeclRefs.empty()) {
- assert(SemaDeclRefs.size() % 2 == 0);
- for (unsigned I = 0; I != SemaDeclRefs.size(); I += 2) {
+ assert(SemaDeclRefs.size() % 3 == 0);
+ for (unsigned I = 0; I != SemaDeclRefs.size(); I += 3) {
if (!SemaObj->StdNamespace)
SemaObj->StdNamespace = SemaDeclRefs[I];
if (!SemaObj->StdBadAlloc)
SemaObj->StdBadAlloc = SemaDeclRefs[I+1];
+ if (!SemaObj->StdAlignValT)
+ SemaObj->StdAlignValT = SemaDeclRefs[I+2];
}
SemaDeclRefs.clear();
}
@@ -6961,6 +7170,7 @@ void ASTReader::UpdateSema() {
PragmaMSPointersToMembersState,
PointersToMembersPragmaLocation);
}
+ SemaObj->ForceCUDAHostDeviceDepth = ForceCUDAHostDeviceDepth;
}
IdentifierInfo *ASTReader::get(StringRef Name) {
@@ -6999,6 +7209,7 @@ IdentifierInfo *ASTReader::get(StringRef Name) {
}
namespace clang {
+
/// \brief An identifier-lookup iterator that enumerates all of the
/// identifiers stored within a set of AST files.
class ASTIdentifierIterator : public IdentifierIterator {
@@ -7026,7 +7237,8 @@ namespace clang {
StringRef Next() override;
};
-}
+
+} // end namespace clang
ASTIdentifierIterator::ASTIdentifierIterator(const ASTReader &Reader,
bool SkipModules)
@@ -7058,6 +7270,7 @@ StringRef ASTIdentifierIterator::Next() {
}
namespace {
+
/// A utility for appending two IdentifierIterators.
class ChainedIdentifierIterator : public IdentifierIterator {
std::unique_ptr<IdentifierIterator> Current;
@@ -7082,6 +7295,7 @@ public:
return Next();
}
};
+
} // end anonymous namespace.
IdentifierIterator *ASTReader::getIdentifiers() {
@@ -7097,7 +7311,9 @@ IdentifierIterator *ASTReader::getIdentifiers() {
return new ASTIdentifierIterator(*this);
}
-namespace clang { namespace serialization {
+namespace clang {
+namespace serialization {
+
class ReadMethodPoolVisitor {
ASTReader &Reader;
Selector Sel;
@@ -7119,7 +7335,7 @@ namespace clang { namespace serialization {
bool operator()(ModuleFile &M) {
if (!M.SelectorLookupTable)
return false;
-
+
// If we've already searched this module file, skip it now.
if (M.Generation <= PriorGeneration)
return true;
@@ -7149,14 +7365,14 @@ namespace clang { namespace serialization {
FactoryHasMoreThanOneDecl = Data.FactoryHasMoreThanOneDecl;
return true;
}
-
+
/// \brief Retrieve the instance methods found by this visitor.
- ArrayRef<ObjCMethodDecl *> getInstanceMethods() const {
- return InstanceMethods;
+ ArrayRef<ObjCMethodDecl *> getInstanceMethods() const {
+ return InstanceMethods;
}
/// \brief Retrieve the instance methods found by this visitor.
- ArrayRef<ObjCMethodDecl *> getFactoryMethods() const {
+ ArrayRef<ObjCMethodDecl *> getFactoryMethods() const {
return FactoryMethods;
}
@@ -7167,7 +7383,9 @@ namespace clang { namespace serialization {
}
bool factoryHasMoreThanOneDecl() const { return FactoryHasMoreThanOneDecl; }
};
-} } // end namespace clang::serialization
+
+} // end namespace serialization
+} // end namespace clang
/// \brief Add the given set of methods to the method list.
static void addMethodsToPool(Sema &S, ArrayRef<ObjCMethodDecl *> Methods,
@@ -7176,14 +7394,14 @@ static void addMethodsToPool(Sema &S, ArrayRef<ObjCMethodDecl *> Methods,
S.addMethodToGlobalList(&List, Methods[I]);
}
}
-
+
void ASTReader::ReadMethodPool(Selector Sel) {
// Get the selector generation and update it to the current generation.
unsigned &Generation = SelectorGeneration[Sel];
unsigned PriorGeneration = Generation;
Generation = getGeneration();
SelectorOutOfDate[Sel] = false;
-
+
// Search for methods defined with this selector.
++NumMethodPoolLookups;
ReadMethodPoolVisitor Visitor(*this, Sel, PriorGeneration);
@@ -7197,7 +7415,7 @@ void ASTReader::ReadMethodPool(Selector Sel) {
if (!getSema())
return;
-
+
Sema &S = *getSema();
Sema::GlobalMethodPool::iterator Pos
= S.MethodPool.insert(std::make_pair(Sel, Sema::GlobalMethods())).first;
@@ -7222,9 +7440,9 @@ void ASTReader::updateOutOfDateSelector(Selector Sel) {
void ASTReader::ReadKnownNamespaces(
SmallVectorImpl<NamespaceDecl *> &Namespaces) {
Namespaces.clear();
-
+
for (unsigned I = 0, N = KnownNamespaces.size(); I != N; ++I) {
- if (NamespaceDecl *Namespace
+ if (NamespaceDecl *Namespace
= dyn_cast_or_null<NamespaceDecl>(GetDecl(KnownNamespaces[I])))
Namespaces.push_back(Namespace);
}
@@ -7313,7 +7531,7 @@ void ASTReader::ReadReferencedSelectors(
SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels) {
if (ReferencedSelectorsData.empty())
return;
-
+
// If there are @selector references added them to its pool. This is for
// implementation of -Wselector.
unsigned int DataSize = ReferencedSelectorsData.size()-1;
@@ -7333,9 +7551,9 @@ void ASTReader::ReadWeakUndeclaredIdentifiers(
return;
for (unsigned I = 0, N = WeakUndeclaredIdentifiers.size(); I < N; /*none*/) {
- IdentifierInfo *WeakId
+ IdentifierInfo *WeakId
= DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
- IdentifierInfo *AliasId
+ IdentifierInfo *AliasId
= DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
SourceLocation Loc
= SourceLocation::getFromRawEncoding(WeakUndeclaredIdentifiers[I++]);
@@ -7355,7 +7573,7 @@ void ASTReader::ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables) {
VT.DefinitionRequired = VTableUses[Idx++];
VTables.push_back(VT);
}
-
+
VTableUses.clear();
}
@@ -7367,17 +7585,18 @@ void ASTReader::ReadPendingInstantiations(
= SourceLocation::getFromRawEncoding(PendingInstantiations[Idx++]);
Pending.push_back(std::make_pair(D, Loc));
- }
+ }
PendingInstantiations.clear();
}
void ASTReader::ReadLateParsedTemplates(
- llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> &LPTMap) {
+ llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>>
+ &LPTMap) {
for (unsigned Idx = 0, N = LateParsedTemplates.size(); Idx < N;
/* In loop */) {
FunctionDecl *FD = cast<FunctionDecl>(GetDecl(LateParsedTemplates[Idx++]));
- LateParsedTemplate *LT = new LateParsedTemplate;
+ auto LT = llvm::make_unique<LateParsedTemplate>();
LT->D = GetDecl(LateParsedTemplates[Idx++]);
ModuleFile *F = getOwningModuleFile(LT->D);
@@ -7388,7 +7607,7 @@ void ASTReader::ReadLateParsedTemplates(
for (unsigned T = 0; T < TokN; ++T)
LT->Toks.push_back(ReadToken(*F, LateParsedTemplates, Idx));
- LPTMap.insert(std::make_pair(FD, LT));
+ LPTMap.insert(std::make_pair(FD, std::move(LT)));
}
LateParsedTemplates.clear();
@@ -7498,12 +7717,12 @@ IdentifierInfo *ASTReader::getLocalIdentifier(ModuleFile &M, unsigned LocalID) {
IdentifierID ASTReader::getGlobalIdentifierID(ModuleFile &M, unsigned LocalID) {
if (LocalID < NUM_PREDEF_IDENT_IDS)
return LocalID;
-
+
ContinuousRangeMap<uint32_t, int, 2>::iterator I
= M.IdentifierRemap.find(LocalID - NUM_PREDEF_IDENT_IDS);
- assert(I != M.IdentifierRemap.end()
+ assert(I != M.IdentifierRemap.end()
&& "Invalid index into identifier index remap");
-
+
return LocalID + I->second;
}
@@ -7524,7 +7743,7 @@ MacroInfo *ASTReader::getMacro(MacroID ID) {
ModuleFile *M = I->second;
unsigned Index = ID - M->BaseMacroID;
MacrosLoaded[ID] = ReadMacroRecord(*M, M->MacroOffsets[Index]);
-
+
if (DeserializationListener)
DeserializationListener->MacroRead(ID + NUM_PREDEF_MACRO_IDS,
MacrosLoaded[ID]);
@@ -7548,12 +7767,12 @@ serialization::SubmoduleID
ASTReader::getGlobalSubmoduleID(ModuleFile &M, unsigned LocalID) {
if (LocalID < NUM_PREDEF_SUBMODULE_IDS)
return LocalID;
-
+
ContinuousRangeMap<uint32_t, int, 2>::iterator I
= M.SubmoduleRemap.find(LocalID - NUM_PREDEF_SUBMODULE_IDS);
- assert(I != M.SubmoduleRemap.end()
+ assert(I != M.SubmoduleRemap.end()
&& "Invalid index into submodule index remap");
-
+
return LocalID + I->second;
}
@@ -7562,12 +7781,12 @@ Module *ASTReader::getSubmodule(SubmoduleID GlobalID) {
assert(GlobalID == 0 && "Unhandled global submodule ID");
return nullptr;
}
-
+
if (GlobalID > SubmodulesLoaded.size()) {
Error("submodule ID out of range in AST file");
return nullptr;
}
-
+
return SubmodulesLoaded[GlobalID - NUM_PREDEF_SUBMODULE_IDS];
}
@@ -7664,17 +7883,17 @@ serialization::SelectorID
ASTReader::getGlobalSelectorID(ModuleFile &M, unsigned LocalID) const {
if (LocalID < NUM_PREDEF_SELECTOR_IDS)
return LocalID;
-
+
ContinuousRangeMap<uint32_t, int, 2>::iterator I
= M.SelectorRemap.find(LocalID - NUM_PREDEF_SELECTOR_IDS);
- assert(I != M.SelectorRemap.end()
+ assert(I != M.SelectorRemap.end()
&& "Invalid index into selector index remap");
-
+
return LocalID + I->second;
}
DeclarationName
-ASTReader::ReadDeclarationName(ModuleFile &F,
+ASTReader::ReadDeclarationName(ModuleFile &F,
const RecordData &Record, unsigned &Idx) {
DeclarationName::NameKind Kind = (DeclarationName::NameKind)Record[Idx++];
switch (Kind) {
@@ -7762,13 +7981,13 @@ void ASTReader::ReadQualifierInfo(ModuleFile &F, QualifierInfo &Info,
Info.NumTemplParamLists = NumTPLists;
if (NumTPLists) {
Info.TemplParamLists = new (Context) TemplateParameterList*[NumTPLists];
- for (unsigned i=0; i != NumTPLists; ++i)
+ for (unsigned i = 0; i != NumTPLists; ++i)
Info.TemplParamLists[i] = ReadTemplateParameterList(F, Record, Idx);
}
}
TemplateName
-ASTReader::ReadTemplateName(ModuleFile &F, const RecordData &Record,
+ASTReader::ReadTemplateName(ModuleFile &F, const RecordData &Record,
unsigned &Idx) {
TemplateName::NameKind Kind = (TemplateName::NameKind)Record[Idx++];
switch (Kind) {
@@ -7795,7 +8014,7 @@ ASTReader::ReadTemplateName(ModuleFile &F, const RecordData &Record,
NestedNameSpecifier *NNS = ReadNestedNameSpecifier(F, Record, Idx);
if (Record[Idx++]) // isIdentifier
return Context.getDependentTemplateName(NNS,
- GetIdentifierInfo(F, Record,
+ GetIdentifierInfo(F, Record,
Idx));
return Context.getDependentTemplateName(NNS,
(OverloadedOperatorKind)Record[Idx++]);
@@ -7808,17 +8027,17 @@ ASTReader::ReadTemplateName(ModuleFile &F, const RecordData &Record,
TemplateName replacement = ReadTemplateName(F, Record, Idx);
return Context.getSubstTemplateTemplateParm(param, replacement);
}
-
+
case TemplateName::SubstTemplateTemplateParmPack: {
- TemplateTemplateParmDecl *Param
+ TemplateTemplateParmDecl *Param
= ReadDeclAs<TemplateTemplateParmDecl>(F, Record, Idx);
if (!Param)
return TemplateName();
-
+
TemplateArgument ArgPack = ReadTemplateArgument(F, Record, Idx);
if (ArgPack.getKind() != TemplateArgument::Pack)
return TemplateName();
-
+
return Context.getSubstTemplateTemplateParmPack(Param, ArgPack);
}
}
@@ -7856,7 +8075,7 @@ TemplateArgument ASTReader::ReadTemplateArgument(ModuleFile &F,
QualType T = readType(F, Record, Idx);
return TemplateArgument(Context, Value, T);
}
- case TemplateArgument::Template:
+ case TemplateArgument::Template:
return TemplateArgument(ReadTemplateName(F, Record, Idx));
case TemplateArgument::TemplateExpansion: {
TemplateName Name = ReadTemplateName(F, Record, Idx);
@@ -7892,9 +8111,10 @@ ASTReader::ReadTemplateParameterList(ModuleFile &F,
while (NumParams--)
Params.push_back(ReadDeclAs<NamedDecl>(F, Record, Idx));
+ // TODO: Concepts
TemplateParameterList* TemplateParams =
TemplateParameterList::Create(Context, TemplateLoc, LAngleLoc,
- Params, RAngleLoc);
+ Params, RAngleLoc, nullptr);
return TemplateParams;
}
@@ -7931,7 +8151,7 @@ ASTReader::ReadCXXBaseSpecifier(ModuleFile &F,
TypeSourceInfo *TInfo = GetTypeSourceInfo(F, Record, Idx);
SourceRange Range = ReadSourceRange(F, Record, Idx);
SourceLocation EllipsisLoc = ReadSourceLocation(F, Record, Idx);
- CXXBaseSpecifier Result(Range, isVirtual, isBaseOfClass, AS, TInfo,
+ CXXBaseSpecifier Result(Range, isVirtual, isBaseOfClass, AS, TInfo,
EllipsisLoc);
Result.setInheritConstructors(inheritConstructors);
return Result;
@@ -7973,49 +8193,29 @@ ASTReader::ReadCXXCtorInitializers(ModuleFile &F, const RecordData &Record,
Expr *Init = ReadExpr(F);
SourceLocation LParenLoc = ReadSourceLocation(F, Record, Idx);
SourceLocation RParenLoc = ReadSourceLocation(F, Record, Idx);
- bool IsWritten = Record[Idx++];
- unsigned SourceOrderOrNumArrayIndices;
- SmallVector<VarDecl *, 8> Indices;
- if (IsWritten) {
- SourceOrderOrNumArrayIndices = Record[Idx++];
- } else {
- SourceOrderOrNumArrayIndices = Record[Idx++];
- Indices.reserve(SourceOrderOrNumArrayIndices);
- for (unsigned i=0; i != SourceOrderOrNumArrayIndices; ++i)
- Indices.push_back(ReadDeclAs<VarDecl>(F, Record, Idx));
- }
CXXCtorInitializer *BOMInit;
- if (Type == CTOR_INITIALIZER_BASE) {
+ if (Type == CTOR_INITIALIZER_BASE)
BOMInit = new (Context)
CXXCtorInitializer(Context, TInfo, IsBaseVirtual, LParenLoc, Init,
RParenLoc, MemberOrEllipsisLoc);
- } else if (Type == CTOR_INITIALIZER_DELEGATING) {
+ else if (Type == CTOR_INITIALIZER_DELEGATING)
BOMInit = new (Context)
CXXCtorInitializer(Context, TInfo, LParenLoc, Init, RParenLoc);
- } else if (IsWritten) {
- if (Member)
- BOMInit = new (Context) CXXCtorInitializer(
- Context, Member, MemberOrEllipsisLoc, LParenLoc, Init, RParenLoc);
- else
- BOMInit = new (Context)
- CXXCtorInitializer(Context, IndirectMember, MemberOrEllipsisLoc,
- LParenLoc, Init, RParenLoc);
- } else {
- if (IndirectMember) {
- assert(Indices.empty() && "Indirect field improperly initialized");
- BOMInit = new (Context)
- CXXCtorInitializer(Context, IndirectMember, MemberOrEllipsisLoc,
- LParenLoc, Init, RParenLoc);
- } else {
- BOMInit = CXXCtorInitializer::Create(
- Context, Member, MemberOrEllipsisLoc, LParenLoc, Init, RParenLoc,
- Indices.data(), Indices.size());
- }
+ else if (Member)
+ BOMInit = new (Context)
+ CXXCtorInitializer(Context, Member, MemberOrEllipsisLoc, LParenLoc,
+ Init, RParenLoc);
+ else
+ BOMInit = new (Context)
+ CXXCtorInitializer(Context, IndirectMember, MemberOrEllipsisLoc,
+ LParenLoc, Init, RParenLoc);
+
+ if (/*IsWritten*/Record[Idx++]) {
+ unsigned SourceOrder = Record[Idx++];
+ BOMInit->setSourceOrder(SourceOrder);
}
- if (IsWritten)
- BOMInit->setSourceOrder(SourceOrderOrNumArrayIndices);
CtorInitializers[i] = BOMInit;
}
@@ -8078,7 +8278,7 @@ ASTReader::ReadNestedNameSpecifier(ModuleFile &F,
}
NestedNameSpecifierLoc
-ASTReader::ReadNestedNameSpecifierLoc(ModuleFile &F, const RecordData &Record,
+ASTReader::ReadNestedNameSpecifierLoc(ModuleFile &F, const RecordData &Record,
unsigned &Idx) {
unsigned N = Record[Idx++];
NestedNameSpecifierLocBuilder Builder;
@@ -8087,7 +8287,7 @@ ASTReader::ReadNestedNameSpecifierLoc(ModuleFile &F, const RecordData &Record,
= (NestedNameSpecifier::SpecifierKind)Record[Idx++];
switch (Kind) {
case NestedNameSpecifier::Identifier: {
- IdentifierInfo *II = GetIdentifierInfo(F, Record, Idx);
+ IdentifierInfo *II = GetIdentifierInfo(F, Record, Idx);
SourceRange Range = ReadSourceRange(F, Record, Idx);
Builder.Extend(Context, II, Range.getBegin(), Range.getEnd());
break;
@@ -8116,7 +8316,7 @@ ASTReader::ReadNestedNameSpecifierLoc(ModuleFile &F, const RecordData &Record,
SourceLocation ColonColonLoc = ReadSourceLocation(F, Record, Idx);
// FIXME: 'template' keyword location not saved anywhere, so we fake it.
- Builder.Extend(Context,
+ Builder.Extend(Context,
Template? T->getTypeLoc().getBeginLoc() : SourceLocation(),
T->getTypeLoc(), ColonColonLoc);
break;
@@ -8185,7 +8385,7 @@ std::string ASTReader::ReadPath(ModuleFile &F, const RecordData &Record,
return Filename;
}
-VersionTuple ASTReader::ReadVersionTuple(const RecordData &Record,
+VersionTuple ASTReader::ReadVersionTuple(const RecordData &Record,
unsigned &Idx) {
unsigned Major = Record[Idx++];
unsigned Minor = Record[Idx++];
@@ -8197,7 +8397,7 @@ VersionTuple ASTReader::ReadVersionTuple(const RecordData &Record,
return VersionTuple(Major, Minor - 1, Subminor - 1);
}
-CXXTemporary *ASTReader::ReadCXXTemporary(ModuleFile &F,
+CXXTemporary *ASTReader::ReadCXXTemporary(ModuleFile &F,
const RecordData &Record,
unsigned &Idx) {
CXXDestructorDecl *Decl = ReadDeclAs<CXXDestructorDecl>(F, Record, Idx);
@@ -8283,6 +8483,10 @@ void ASTReader::ReadComments() {
}
}
NextCursor:
+ // De-serialized SourceLocations get negative FileIDs for other modules,
+ // potentially invalidating the original order. Sort it again.
+ std::sort(Comments.begin(), Comments.end(),
+ BeforeThanCompare<RawComment>(SourceMgr));
Context.Comments.addDeserializedComments(Comments);
}
}
@@ -8350,16 +8554,14 @@ void ASTReader::finishPendingActions() {
for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx != NumIDs;
++IDIdx) {
const PendingMacroInfo &Info = GlobalIDs[IDIdx];
- if (Info.M->Kind != MK_ImplicitModule &&
- Info.M->Kind != MK_ExplicitModule)
+ if (!Info.M->isModule())
resolvePendingMacro(II, Info);
}
// Handle module imports.
for (unsigned IDIdx = 0, NumIDs = GlobalIDs.size(); IDIdx != NumIDs;
++IDIdx) {
const PendingMacroInfo &Info = GlobalIDs[IDIdx];
- if (Info.M->Kind == MK_ImplicitModule ||
- Info.M->Kind == MK_ExplicitModule)
+ if (Info.M->isModule())
resolvePendingMacro(II, Info);
}
}
@@ -8390,7 +8592,7 @@ void ASTReader::finishPendingActions() {
// If we deserialized any C++ or Objective-C class definitions, any
// Objective-C protocol definitions, or any redeclarable templates, make sure
- // that all redeclarations point to the definitions. Note that this can only
+ // that all redeclarations point to the definitions. Note that this can only
// happen now, after the redeclaration chains have been fully wired.
for (Decl *D : PendingDefinitions) {
if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
@@ -8446,8 +8648,11 @@ void ASTReader::finishPendingActions() {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(PB->first)) {
// FIXME: Check for =delete/=default?
// FIXME: Complain about ODR violations here?
- if (!getContext().getLangOpts().Modules || !FD->hasBody())
+ const FunctionDecl *Defn = nullptr;
+ if (!getContext().getLangOpts().Modules || !FD->hasBody(Defn))
FD->setLazyBody(PB->second);
+ else
+ mergeDefinitionVisibility(const_cast<FunctionDecl*>(Defn), FD);
continue;
}
@@ -8542,7 +8747,7 @@ void ASTReader::diagnoseOdrViolations() {
// completed. We only really need to mark FieldDecls as invalid here.
if (!isa<TagDecl>(D))
D->setInvalidDecl();
-
+
// Ensure we don't accidentally recursively enter deserialization while
// we're producing our diagnostic.
Deserializing RecursionGuard(this);
@@ -8617,7 +8822,7 @@ void ASTReader::diagnoseOdrViolations() {
}
void ASTReader::StartedDeserializing() {
- if (++NumCurrentElementsDeserializing == 1 && ReadTimer.get())
+ if (++NumCurrentElementsDeserializing == 1 && ReadTimer.get())
ReadTimer->startTimer();
}
@@ -8694,7 +8899,10 @@ ASTReader::ASTReader(
bool AllowConfigurationMismatch, bool ValidateSystemInputs,
bool UseGlobalIndex,
std::unique_ptr<llvm::Timer> ReadTimer)
- : Listener(new PCHValidator(PP, *this)), DeserializationListener(nullptr),
+ : Listener(DisableValidation ?
+ cast<ASTReaderListener>(new SimpleASTReaderListener(PP)) :
+ cast<ASTReaderListener>(new PCHValidator(PP, *this))),
+ DeserializationListener(nullptr),
OwnsDeserializationListener(false), SourceMgr(PP.getSourceManager()),
FileMgr(PP.getFileManager()), PCHContainerRdr(PCHContainerRdr),
Diags(PP.getDiagnostics()), SemaObj(nullptr), PP(PP), Context(Context),
@@ -8743,3 +8951,10 @@ ASTReader::~ASTReader() {
IdentifierResolver &ASTReader::getIdResolver() {
return SemaObj ? SemaObj->IdResolver : DummyIdResolver;
}
+
+unsigned ASTRecordReader::readRecord(llvm::BitstreamCursor &Cursor,
+ unsigned AbbrevID) {
+ Idx = 0;
+ Record.clear();
+ return Cursor.readRecord(AbbrevID, Record);
+}
diff --git a/lib/Serialization/ASTReaderDecl.cpp b/lib/Serialization/ASTReaderDecl.cpp
index 35da8f3ebcfe..6e18b208a9ae 100644
--- a/lib/Serialization/ASTReaderDecl.cpp
+++ b/lib/Serialization/ASTReaderDecl.cpp
@@ -12,10 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Serialization/ASTReader.h"
#include "ASTCommon.h"
#include "ASTReaderInternals.h"
-#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclGroup.h"
@@ -24,6 +22,7 @@
#include "clang/AST/Expr.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Serialization/ASTReader.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
@@ -36,13 +35,11 @@ using namespace clang::serialization;
namespace clang {
class ASTDeclReader : public DeclVisitor<ASTDeclReader, void> {
ASTReader &Reader;
- ModuleFile &F;
- uint64_t Offset;
+ ASTRecordReader &Record;
+ ASTReader::RecordLocation Loc;
const DeclID ThisDeclID;
const SourceLocation ThisDeclLoc;
typedef ASTReader::RecordData RecordData;
- const RecordData &Record;
- unsigned &Idx;
TypeID TypeIDForTypeDecl;
unsigned AnonymousDeclNumber;
GlobalDeclID NamedDeclForTagDecl;
@@ -57,83 +54,77 @@ namespace clang {
uint64_t GetCurrentCursorOffset();
- uint64_t ReadLocalOffset(const RecordData &R, unsigned &I) {
- uint64_t LocalOffset = R[I++];
- assert(LocalOffset < Offset && "offset point after current record");
- return LocalOffset ? Offset - LocalOffset : 0;
+ uint64_t ReadLocalOffset() {
+ uint64_t LocalOffset = Record.readInt();
+ assert(LocalOffset < Loc.Offset && "offset point after current record");
+ return LocalOffset ? Loc.Offset - LocalOffset : 0;
}
- uint64_t ReadGlobalOffset(ModuleFile &F, const RecordData &R, unsigned &I) {
- uint64_t Local = ReadLocalOffset(R, I);
- return Local ? Reader.getGlobalBitOffset(F, Local) : 0;
+ uint64_t ReadGlobalOffset() {
+ uint64_t Local = ReadLocalOffset();
+ return Local ? Record.getGlobalBitOffset(Local) : 0;
}
- SourceLocation ReadSourceLocation(const RecordData &R, unsigned &I) {
- return Reader.ReadSourceLocation(F, R, I);
+ SourceLocation ReadSourceLocation() {
+ return Record.readSourceLocation();
}
- SourceRange ReadSourceRange(const RecordData &R, unsigned &I) {
- return Reader.ReadSourceRange(F, R, I);
+ SourceRange ReadSourceRange() {
+ return Record.readSourceRange();
}
- TypeSourceInfo *GetTypeSourceInfo(const RecordData &R, unsigned &I) {
- return Reader.GetTypeSourceInfo(F, R, I);
+ TypeSourceInfo *GetTypeSourceInfo() {
+ return Record.getTypeSourceInfo();
}
- serialization::DeclID ReadDeclID(const RecordData &R, unsigned &I) {
- return Reader.ReadDeclID(F, R, I);
+ serialization::DeclID ReadDeclID() {
+ return Record.readDeclID();
}
- std::string ReadString(const RecordData &R, unsigned &I) {
- return Reader.ReadString(R, I);
+ std::string ReadString() {
+ return Record.readString();
}
void ReadDeclIDList(SmallVectorImpl<DeclID> &IDs) {
- for (unsigned I = 0, Size = Record[Idx++]; I != Size; ++I)
- IDs.push_back(ReadDeclID(Record, Idx));
+ for (unsigned I = 0, Size = Record.readInt(); I != Size; ++I)
+ IDs.push_back(ReadDeclID());
}
- Decl *ReadDecl(const RecordData &R, unsigned &I) {
- return Reader.ReadDecl(F, R, I);
+ Decl *ReadDecl() {
+ return Record.readDecl();
}
template<typename T>
- T *ReadDeclAs(const RecordData &R, unsigned &I) {
- return Reader.ReadDeclAs<T>(F, R, I);
+ T *ReadDeclAs() {
+ return Record.readDeclAs<T>();
}
- void ReadQualifierInfo(QualifierInfo &Info,
- const RecordData &R, unsigned &I) {
- Reader.ReadQualifierInfo(F, Info, R, I);
- }
-
- void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc, DeclarationName Name,
- const RecordData &R, unsigned &I) {
- Reader.ReadDeclarationNameLoc(F, DNLoc, Name, R, I);
+ void ReadQualifierInfo(QualifierInfo &Info) {
+ Record.readQualifierInfo(Info);
}
-
- void ReadDeclarationNameInfo(DeclarationNameInfo &NameInfo,
- const RecordData &R, unsigned &I) {
- Reader.ReadDeclarationNameInfo(F, NameInfo, R, I);
+
+ void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc, DeclarationName Name) {
+ Record.readDeclarationNameLoc(DNLoc, Name);
}
- serialization::SubmoduleID readSubmoduleID(const RecordData &R,
- unsigned &I) {
- if (I >= R.size())
+ serialization::SubmoduleID readSubmoduleID() {
+ if (Record.getIdx() == Record.size())
return 0;
-
- return Reader.getGlobalSubmoduleID(F, R[I++]);
+
+ return Record.getGlobalSubmoduleID(Record.readInt());
}
-
- Module *readModule(const RecordData &R, unsigned &I) {
- return Reader.getSubmodule(readSubmoduleID(R, I));
+
+ Module *readModule() {
+ return Record.getSubmodule(readSubmoduleID());
}
void ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update);
- void ReadCXXDefinitionData(struct CXXRecordDecl::DefinitionData &Data,
- const RecordData &R, unsigned &I);
+ void ReadCXXDefinitionData(struct CXXRecordDecl::DefinitionData &Data);
void MergeDefinitionData(CXXRecordDecl *D,
struct CXXRecordDecl::DefinitionData &&NewDD);
+ void ReadObjCDefinitionData(struct ObjCInterfaceDecl::DefinitionData &Data);
+ void MergeDefinitionData(ObjCInterfaceDecl *D,
+ struct ObjCInterfaceDecl::DefinitionData &&NewDD);
static NamedDecl *getAnonymousDeclForMerging(ASTReader &Reader,
DeclContext *DC,
@@ -143,13 +134,13 @@ namespace clang {
/// Results from loading a RedeclarableDecl.
class RedeclarableResult {
- GlobalDeclID FirstID;
Decl *MergeWith;
+ GlobalDeclID FirstID;
bool IsKeyDecl;
public:
- RedeclarableResult(GlobalDeclID FirstID, Decl *MergeWith, bool IsKeyDecl)
- : FirstID(FirstID), MergeWith(MergeWith), IsKeyDecl(IsKeyDecl) {}
+ RedeclarableResult(Decl *MergeWith, GlobalDeclID FirstID, bool IsKeyDecl)
+ : MergeWith(MergeWith), FirstID(FirstID), IsKeyDecl(IsKeyDecl) {}
/// \brief Retrieve the first ID.
GlobalDeclID getFirstID() const { return FirstID; }
@@ -171,12 +162,12 @@ namespace clang {
ASTReader &Reader;
NamedDecl *New;
NamedDecl *Existing;
- mutable bool AddResult;
+ bool AddResult;
unsigned AnonymousDeclNumber;
IdentifierInfo *TypedefNameForLinkage;
- void operator=(FindExistingResult&) = delete;
+ void operator=(FindExistingResult &&) = delete;
public:
FindExistingResult(ASTReader &Reader)
@@ -190,7 +181,7 @@ namespace clang {
AnonymousDeclNumber(AnonymousDeclNumber),
TypedefNameForLinkage(TypedefNameForLinkage) {}
- FindExistingResult(const FindExistingResult &Other)
+ FindExistingResult(FindExistingResult &&Other)
: Reader(Other.Reader), New(Other.New), Existing(Other.Existing),
AddResult(Other.AddResult),
AnonymousDeclNumber(Other.AnonymousDeclNumber),
@@ -215,11 +206,11 @@ namespace clang {
FindExistingResult findExisting(NamedDecl *D);
public:
- ASTDeclReader(ASTReader &Reader, ASTReader::RecordLocation Loc,
- DeclID thisDeclID, SourceLocation ThisDeclLoc,
- const RecordData &Record, unsigned &Idx)
- : Reader(Reader), F(*Loc.F), Offset(Loc.Offset), ThisDeclID(thisDeclID),
- ThisDeclLoc(ThisDeclLoc), Record(Record), Idx(Idx),
+ ASTDeclReader(ASTReader &Reader, ASTRecordReader &Record,
+ ASTReader::RecordLocation Loc,
+ DeclID thisDeclID, SourceLocation ThisDeclLoc)
+ : Reader(Reader), Record(Record), Loc(Loc),
+ ThisDeclID(thisDeclID), ThisDeclLoc(ThisDeclLoc),
TypeIDForTypeDecl(0), NamedDeclForTagDecl(0),
TypedefNameForLinkage(nullptr), HasPendingBody(false),
IsDeclMarkedUsed(false) {}
@@ -251,8 +242,7 @@ namespace clang {
void Visit(Decl *D);
- void UpdateDecl(Decl *D, ModuleFile &ModuleFile,
- const RecordData &Record);
+ void UpdateDecl(Decl *D);
static void setNextObjCCategory(ObjCCategoryDecl *Cat,
ObjCCategoryDecl *Next) {
@@ -313,6 +303,8 @@ namespace clang {
void VisitVarDecl(VarDecl *VD) { VisitVarDeclImpl(VD); }
void VisitImplicitParamDecl(ImplicitParamDecl *PD);
void VisitParmVarDecl(ParmVarDecl *PD);
+ void VisitDecompositionDecl(DecompositionDecl *DD);
+ void VisitBindingDecl(BindingDecl *BD);
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
DeclID VisitTemplateDecl(TemplateDecl *D);
RedeclarableResult VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
@@ -323,9 +315,11 @@ namespace clang {
void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
void VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingPackDecl(UsingPackDecl *D);
void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitConstructorUsingShadowDecl(ConstructorUsingShadowDecl *D);
void VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ void VisitExportDecl(ExportDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *AD);
void VisitImportDecl(ImportDecl *D);
void VisitAccessSpecDecl(AccessSpecDecl *D);
@@ -377,27 +371,6 @@ namespace clang {
void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
void VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D);
void VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D);
-
- /// We've merged the definition \p MergedDef into the existing definition
- /// \p Def. Ensure that \p Def is made visible whenever \p MergedDef is made
- /// visible.
- void mergeDefinitionVisibility(NamedDecl *Def, NamedDecl *MergedDef) {
- if (Def->isHidden()) {
- // If MergedDef is visible or becomes visible, make the definition visible.
- if (!MergedDef->isHidden())
- Def->Hidden = false;
- else if (Reader.getContext().getLangOpts().ModulesLocalVisibility) {
- Reader.getContext().mergeDefinitionIntoModule(
- Def, MergedDef->getImportedOwningModule(),
- /*NotifyListeners*/ false);
- Reader.PendingMergedDefinitionsToDeduplicate.insert(Def);
- } else {
- auto SubmoduleID = MergedDef->getOwningModuleID();
- assert(SubmoduleID && "hidden definition in no module");
- Reader.HiddenNamesMap[Reader.getSubmodule(SubmoduleID)].push_back(Def);
- }
- }
- }
};
} // end namespace clang
@@ -437,14 +410,15 @@ public:
};
} // end anonymous namespace
-template<typename DeclT>
-llvm::iterator_range<MergedRedeclIterator<DeclT>> merged_redecls(DeclT *D) {
+template <typename DeclT>
+static llvm::iterator_range<MergedRedeclIterator<DeclT>>
+merged_redecls(DeclT *D) {
return llvm::make_range(MergedRedeclIterator<DeclT>(D),
MergedRedeclIterator<DeclT>());
}
uint64_t ASTDeclReader::GetCurrentCursorOffset() {
- return F.DeclsCursor.GetCurrentBitNo() + F.GlobalBitOffset;
+ return Loc.F->DeclsCursor.GetCurrentBitNo() + Loc.F->GlobalBitOffset;
}
void ASTDeclReader::Visit(Decl *D) {
@@ -459,11 +433,10 @@ void ASTDeclReader::Visit(Decl *D) {
if (DD->DeclInfo) {
DeclaratorDecl::ExtInfo *Info =
DD->DeclInfo.get<DeclaratorDecl::ExtInfo *>();
- Info->TInfo =
- GetTypeSourceInfo(Record, Idx);
+ Info->TInfo = GetTypeSourceInfo();
}
else {
- DD->DeclInfo = GetTypeSourceInfo(Record, Idx);
+ DD->DeclInfo = GetTypeSourceInfo();
}
}
@@ -484,11 +457,11 @@ void ASTDeclReader::Visit(Decl *D) {
// We only read it if FD doesn't already have a body (e.g., from another
// module).
// FIXME: Can we diagnose ODR violations somehow?
- if (Record[Idx++]) {
+ if (Record.readInt()) {
if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- CD->NumCtorInitializers = Record[Idx++];
+ CD->NumCtorInitializers = Record.readInt();
if (CD->NumCtorInitializers)
- CD->CtorInitializers = ReadGlobalOffset(F, Record, Idx);
+ CD->CtorInitializers = ReadGlobalOffset();
}
Reader.PendingBodies[FD] = GetCurrentCursorOffset();
HasPendingBody = true;
@@ -505,8 +478,8 @@ void ASTDeclReader::VisitDecl(Decl *D) {
// example, a function parameter can be used in decltype() in trailing
// return type of the function). Use the translation unit DeclContext as a
// placeholder.
- GlobalDeclID SemaDCIDForTemplateParmDecl = ReadDeclID(Record, Idx);
- GlobalDeclID LexicalDCIDForTemplateParmDecl = ReadDeclID(Record, Idx);
+ GlobalDeclID SemaDCIDForTemplateParmDecl = ReadDeclID();
+ GlobalDeclID LexicalDCIDForTemplateParmDecl = ReadDeclID();
if (!LexicalDCIDForTemplateParmDecl)
LexicalDCIDForTemplateParmDecl = SemaDCIDForTemplateParmDecl;
Reader.addPendingDeclContextInfo(D,
@@ -514,8 +487,8 @@ void ASTDeclReader::VisitDecl(Decl *D) {
LexicalDCIDForTemplateParmDecl);
D->setDeclContext(Reader.getContext().getTranslationUnitDecl());
} else {
- DeclContext *SemaDC = ReadDeclAs<DeclContext>(Record, Idx);
- DeclContext *LexicalDC = ReadDeclAs<DeclContext>(Record, Idx);
+ DeclContext *SemaDC = ReadDeclAs<DeclContext>();
+ DeclContext *LexicalDC = ReadDeclAs<DeclContext>();
if (!LexicalDC)
LexicalDC = SemaDC;
DeclContext *MergedSemaDC = Reader.MergedDeclContexts.lookup(SemaDC);
@@ -525,27 +498,27 @@ void ASTDeclReader::VisitDecl(Decl *D) {
Reader.getContext());
}
D->setLocation(ThisDeclLoc);
- D->setInvalidDecl(Record[Idx++]);
- if (Record[Idx++]) { // hasAttrs
+ D->setInvalidDecl(Record.readInt());
+ if (Record.readInt()) { // hasAttrs
AttrVec Attrs;
- Reader.ReadAttributes(F, Attrs, Record, Idx);
+ Record.readAttributes(Attrs);
// Avoid calling setAttrs() directly because it uses Decl::getASTContext()
// internally which is unsafe during derialization.
D->setAttrsImpl(Attrs, Reader.getContext());
}
- D->setImplicit(Record[Idx++]);
- D->Used = Record[Idx++];
+ D->setImplicit(Record.readInt());
+ D->Used = Record.readInt();
IsDeclMarkedUsed |= D->Used;
- D->setReferenced(Record[Idx++]);
- D->setTopLevelDeclInObjCContainer(Record[Idx++]);
- D->setAccess((AccessSpecifier)Record[Idx++]);
+ D->setReferenced(Record.readInt());
+ D->setTopLevelDeclInObjCContainer(Record.readInt());
+ D->setAccess((AccessSpecifier)Record.readInt());
D->FromASTFile = true;
- D->setModulePrivate(Record[Idx++]);
+ D->setModulePrivate(Record.readInt());
D->Hidden = D->isModulePrivate();
// Determine whether this declaration is part of a (sub)module. If so, it
// may not yet be visible.
- if (unsigned SubmoduleID = readSubmoduleID(Record, Idx)) {
+ if (unsigned SubmoduleID = readSubmoduleID()) {
// Store the owning submodule ID in the declaration.
D->setOwningModuleID(SubmoduleID);
@@ -571,22 +544,22 @@ void ASTDeclReader::VisitDecl(Decl *D) {
void ASTDeclReader::VisitPragmaCommentDecl(PragmaCommentDecl *D) {
VisitDecl(D);
- D->setLocation(ReadSourceLocation(Record, Idx));
- D->CommentKind = (PragmaMSCommentKind)Record[Idx++];
- std::string Arg = ReadString(Record, Idx);
+ D->setLocation(ReadSourceLocation());
+ D->CommentKind = (PragmaMSCommentKind)Record.readInt();
+ std::string Arg = ReadString();
memcpy(D->getTrailingObjects<char>(), Arg.data(), Arg.size());
D->getTrailingObjects<char>()[Arg.size()] = '\0';
}
void ASTDeclReader::VisitPragmaDetectMismatchDecl(PragmaDetectMismatchDecl *D) {
VisitDecl(D);
- D->setLocation(ReadSourceLocation(Record, Idx));
- std::string Name = ReadString(Record, Idx);
+ D->setLocation(ReadSourceLocation());
+ std::string Name = ReadString();
memcpy(D->getTrailingObjects<char>(), Name.data(), Name.size());
D->getTrailingObjects<char>()[Name.size()] = '\0';
D->ValueStart = Name.size() + 1;
- std::string Value = ReadString(Record, Idx);
+ std::string Value = ReadString();
memcpy(D->getTrailingObjects<char>() + D->ValueStart, Value.data(),
Value.size());
D->getTrailingObjects<char>()[D->ValueStart + Value.size()] = '\0';
@@ -598,24 +571,24 @@ void ASTDeclReader::VisitTranslationUnitDecl(TranslationUnitDecl *TU) {
void ASTDeclReader::VisitNamedDecl(NamedDecl *ND) {
VisitDecl(ND);
- ND->setDeclName(Reader.ReadDeclarationName(F, Record, Idx));
- AnonymousDeclNumber = Record[Idx++];
+ ND->setDeclName(Record.readDeclarationName());
+ AnonymousDeclNumber = Record.readInt();
}
void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) {
VisitNamedDecl(TD);
- TD->setLocStart(ReadSourceLocation(Record, Idx));
+ TD->setLocStart(ReadSourceLocation());
// Delay type reading until after we have fully initialized the decl.
- TypeIDForTypeDecl = Reader.getGlobalTypeID(F, Record[Idx++]);
+ TypeIDForTypeDecl = Record.getGlobalTypeID(Record.readInt());
}
ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitTypedefNameDecl(TypedefNameDecl *TD) {
RedeclarableResult Redecl = VisitRedeclarable(TD);
VisitTypeDecl(TD);
- TypeSourceInfo *TInfo = GetTypeSourceInfo(Record, Idx);
- if (Record[Idx++]) { // isModed
- QualType modedT = Reader.readType(F, Record, Idx);
+ TypeSourceInfo *TInfo = GetTypeSourceInfo();
+ if (Record.readInt()) { // isModed
+ QualType modedT = Record.readType();
TD->setModedTypeSourceInfo(TInfo, modedT);
} else
TD->setTypeSourceInfo(TInfo);
@@ -629,7 +602,7 @@ void ASTDeclReader::VisitTypedefDecl(TypedefDecl *TD) {
void ASTDeclReader::VisitTypeAliasDecl(TypeAliasDecl *TD) {
RedeclarableResult Redecl = VisitTypedefNameDecl(TD);
- if (auto *Template = ReadDeclAs<TypeAliasTemplateDecl>(Record, Idx))
+ if (auto *Template = ReadDeclAs<TypeAliasTemplateDecl>())
// Merged when we merge the template.
TD->setDescribedAliasTemplate(Template);
else
@@ -640,27 +613,27 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitTagDecl(TagDecl *TD) {
RedeclarableResult Redecl = VisitRedeclarable(TD);
VisitTypeDecl(TD);
- TD->IdentifierNamespace = Record[Idx++];
- TD->setTagKind((TagDecl::TagKind)Record[Idx++]);
+ TD->IdentifierNamespace = Record.readInt();
+ TD->setTagKind((TagDecl::TagKind)Record.readInt());
if (!isa<CXXRecordDecl>(TD))
- TD->setCompleteDefinition(Record[Idx++]);
- TD->setEmbeddedInDeclarator(Record[Idx++]);
- TD->setFreeStanding(Record[Idx++]);
- TD->setCompleteDefinitionRequired(Record[Idx++]);
- TD->setBraceRange(ReadSourceRange(Record, Idx));
+ TD->setCompleteDefinition(Record.readInt());
+ TD->setEmbeddedInDeclarator(Record.readInt());
+ TD->setFreeStanding(Record.readInt());
+ TD->setCompleteDefinitionRequired(Record.readInt());
+ TD->setBraceRange(ReadSourceRange());
- switch (Record[Idx++]) {
+ switch (Record.readInt()) {
case 0:
break;
case 1: { // ExtInfo
TagDecl::ExtInfo *Info = new (Reader.getContext()) TagDecl::ExtInfo();
- ReadQualifierInfo(*Info, Record, Idx);
+ ReadQualifierInfo(*Info);
TD->TypedefNameDeclOrQualifier = Info;
break;
}
case 2: // TypedefNameForAnonDecl
- NamedDeclForTagDecl = ReadDeclID(Record, Idx);
- TypedefNameForLinkage = Reader.GetIdentifierInfo(F, Record, Idx);
+ NamedDeclForTagDecl = ReadDeclID();
+ TypedefNameForLinkage = Record.getIdentifierInfo();
break;
default:
llvm_unreachable("unexpected tag info kind");
@@ -673,16 +646,16 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitTagDecl(TagDecl *TD) {
void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
VisitTagDecl(ED);
- if (TypeSourceInfo *TI = Reader.GetTypeSourceInfo(F, Record, Idx))
+ if (TypeSourceInfo *TI = GetTypeSourceInfo())
ED->setIntegerTypeSourceInfo(TI);
else
- ED->setIntegerType(Reader.readType(F, Record, Idx));
- ED->setPromotionType(Reader.readType(F, Record, Idx));
- ED->setNumPositiveBits(Record[Idx++]);
- ED->setNumNegativeBits(Record[Idx++]);
- ED->IsScoped = Record[Idx++];
- ED->IsScopedUsingClassTag = Record[Idx++];
- ED->IsFixed = Record[Idx++];
+ ED->setIntegerType(Record.readType());
+ ED->setPromotionType(Record.readType());
+ ED->setNumPositiveBits(Record.readInt());
+ ED->setNumNegativeBits(Record.readInt());
+ ED->IsScoped = Record.readInt();
+ ED->IsScopedUsingClassTag = Record.readInt();
+ ED->IsFixed = Record.readInt();
// If this is a definition subject to the ODR, and we already have a
// definition, merge this one into it.
@@ -703,15 +676,16 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
if (OldDef) {
Reader.MergedDeclContexts.insert(std::make_pair(ED, OldDef));
ED->IsCompleteDefinition = false;
- mergeDefinitionVisibility(OldDef, ED);
+ Reader.mergeDefinitionVisibility(OldDef, ED);
} else {
OldDef = ED;
}
}
- if (EnumDecl *InstED = ReadDeclAs<EnumDecl>(Record, Idx)) {
- TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
- SourceLocation POI = ReadSourceLocation(Record, Idx);
+ if (EnumDecl *InstED = ReadDeclAs<EnumDecl>()) {
+ TemplateSpecializationKind TSK =
+ (TemplateSpecializationKind)Record.readInt();
+ SourceLocation POI = ReadSourceLocation();
ED->setInstantiationOfMemberEnum(Reader.getContext(), InstED, TSK);
ED->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
}
@@ -720,33 +694,33 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitRecordDeclImpl(RecordDecl *RD) {
RedeclarableResult Redecl = VisitTagDecl(RD);
- RD->setHasFlexibleArrayMember(Record[Idx++]);
- RD->setAnonymousStructOrUnion(Record[Idx++]);
- RD->setHasObjectMember(Record[Idx++]);
- RD->setHasVolatileMember(Record[Idx++]);
+ RD->setHasFlexibleArrayMember(Record.readInt());
+ RD->setAnonymousStructOrUnion(Record.readInt());
+ RD->setHasObjectMember(Record.readInt());
+ RD->setHasVolatileMember(Record.readInt());
return Redecl;
}
void ASTDeclReader::VisitValueDecl(ValueDecl *VD) {
VisitNamedDecl(VD);
- VD->setType(Reader.readType(F, Record, Idx));
+ VD->setType(Record.readType());
}
void ASTDeclReader::VisitEnumConstantDecl(EnumConstantDecl *ECD) {
VisitValueDecl(ECD);
- if (Record[Idx++])
- ECD->setInitExpr(Reader.ReadExpr(F));
- ECD->setInitVal(Reader.ReadAPSInt(Record, Idx));
+ if (Record.readInt())
+ ECD->setInitExpr(Record.readExpr());
+ ECD->setInitVal(Record.readAPSInt());
mergeMergeable(ECD);
}
void ASTDeclReader::VisitDeclaratorDecl(DeclaratorDecl *DD) {
VisitValueDecl(DD);
- DD->setInnerLocStart(ReadSourceLocation(Record, Idx));
- if (Record[Idx++]) { // hasExtInfo
+ DD->setInnerLocStart(ReadSourceLocation());
+ if (Record.readInt()) { // hasExtInfo
DeclaratorDecl::ExtInfo *Info
= new (Reader.getContext()) DeclaratorDecl::ExtInfo();
- ReadQualifierInfo(*Info, Record, Idx);
+ ReadQualifierInfo(*Info);
DD->DeclInfo = Info;
}
}
@@ -755,74 +729,72 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
RedeclarableResult Redecl = VisitRedeclarable(FD);
VisitDeclaratorDecl(FD);
- ReadDeclarationNameLoc(FD->DNLoc, FD->getDeclName(), Record, Idx);
- FD->IdentifierNamespace = Record[Idx++];
-
+ ReadDeclarationNameLoc(FD->DNLoc, FD->getDeclName());
+ FD->IdentifierNamespace = Record.readInt();
+
// FunctionDecl's body is handled last at ASTDeclReader::Visit,
// after everything else is read.
- FD->SClass = (StorageClass)Record[Idx++];
- FD->IsInline = Record[Idx++];
- FD->IsInlineSpecified = Record[Idx++];
- FD->IsVirtualAsWritten = Record[Idx++];
- FD->IsPure = Record[Idx++];
- FD->HasInheritedPrototype = Record[Idx++];
- FD->HasWrittenPrototype = Record[Idx++];
- FD->IsDeleted = Record[Idx++];
- FD->IsTrivial = Record[Idx++];
- FD->IsDefaulted = Record[Idx++];
- FD->IsExplicitlyDefaulted = Record[Idx++];
- FD->HasImplicitReturnZero = Record[Idx++];
- FD->IsConstexpr = Record[Idx++];
- FD->HasSkippedBody = Record[Idx++];
- FD->IsLateTemplateParsed = Record[Idx++];
- FD->setCachedLinkage(Linkage(Record[Idx++]));
- FD->EndRangeLoc = ReadSourceLocation(Record, Idx);
-
- switch ((FunctionDecl::TemplatedKind)Record[Idx++]) {
+ FD->SClass = (StorageClass)Record.readInt();
+ FD->IsInline = Record.readInt();
+ FD->IsInlineSpecified = Record.readInt();
+ FD->IsVirtualAsWritten = Record.readInt();
+ FD->IsPure = Record.readInt();
+ FD->HasInheritedPrototype = Record.readInt();
+ FD->HasWrittenPrototype = Record.readInt();
+ FD->IsDeleted = Record.readInt();
+ FD->IsTrivial = Record.readInt();
+ FD->IsDefaulted = Record.readInt();
+ FD->IsExplicitlyDefaulted = Record.readInt();
+ FD->HasImplicitReturnZero = Record.readInt();
+ FD->IsConstexpr = Record.readInt();
+ FD->HasSkippedBody = Record.readInt();
+ FD->IsLateTemplateParsed = Record.readInt();
+ FD->setCachedLinkage(Linkage(Record.readInt()));
+ FD->EndRangeLoc = ReadSourceLocation();
+
+ switch ((FunctionDecl::TemplatedKind)Record.readInt()) {
case FunctionDecl::TK_NonTemplate:
mergeRedeclarable(FD, Redecl);
break;
case FunctionDecl::TK_FunctionTemplate:
// Merged when we merge the template.
- FD->setDescribedFunctionTemplate(ReadDeclAs<FunctionTemplateDecl>(Record,
- Idx));
+ FD->setDescribedFunctionTemplate(ReadDeclAs<FunctionTemplateDecl>());
break;
case FunctionDecl::TK_MemberSpecialization: {
- FunctionDecl *InstFD = ReadDeclAs<FunctionDecl>(Record, Idx);
- TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
- SourceLocation POI = ReadSourceLocation(Record, Idx);
+ FunctionDecl *InstFD = ReadDeclAs<FunctionDecl>();
+ TemplateSpecializationKind TSK =
+ (TemplateSpecializationKind)Record.readInt();
+ SourceLocation POI = ReadSourceLocation();
FD->setInstantiationOfMemberFunction(Reader.getContext(), InstFD, TSK);
FD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
mergeRedeclarable(FD, Redecl);
break;
}
case FunctionDecl::TK_FunctionTemplateSpecialization: {
- FunctionTemplateDecl *Template = ReadDeclAs<FunctionTemplateDecl>(Record,
- Idx);
- TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
-
+ FunctionTemplateDecl *Template = ReadDeclAs<FunctionTemplateDecl>();
+ TemplateSpecializationKind TSK =
+ (TemplateSpecializationKind)Record.readInt();
+
// Template arguments.
SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx,
- /*Canonicalize*/ true);
+ Record.readTemplateArgumentList(TemplArgs, /*Canonicalize*/ true);
// Template args as written.
SmallVector<TemplateArgumentLoc, 8> TemplArgLocs;
SourceLocation LAngleLoc, RAngleLoc;
- bool HasTemplateArgumentsAsWritten = Record[Idx++];
+ bool HasTemplateArgumentsAsWritten = Record.readInt();
if (HasTemplateArgumentsAsWritten) {
- unsigned NumTemplateArgLocs = Record[Idx++];
+ unsigned NumTemplateArgLocs = Record.readInt();
TemplArgLocs.reserve(NumTemplateArgLocs);
for (unsigned i=0; i != NumTemplateArgLocs; ++i)
- TemplArgLocs.push_back(
- Reader.ReadTemplateArgumentLoc(F, Record, Idx));
-
- LAngleLoc = ReadSourceLocation(Record, Idx);
- RAngleLoc = ReadSourceLocation(Record, Idx);
+ TemplArgLocs.push_back(Record.readTemplateArgumentLoc());
+
+ LAngleLoc = ReadSourceLocation();
+ RAngleLoc = ReadSourceLocation();
}
-
- SourceLocation POI = ReadSourceLocation(Record, Idx);
+
+ SourceLocation POI = ReadSourceLocation();
ASTContext &C = Reader.getContext();
TemplateArgumentList *TemplArgList
@@ -841,8 +813,7 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
if (FD->isCanonicalDecl()) { // if canonical add to template's set.
// The template that contains the specializations set. It's not safe to
// use getCanonicalDecl on Template since it may still be initializing.
- FunctionTemplateDecl *CanonTemplate
- = ReadDeclAs<FunctionTemplateDecl>(Record, Idx);
+ FunctionTemplateDecl *CanonTemplate = ReadDeclAs<FunctionTemplateDecl>();
// Get the InsertPos by FindNodeOrInsertPos() instead of calling
// InsertNode(FTInfo) directly to avoid the getASTContext() call in
// FunctionTemplateSpecializationInfo's Profile().
@@ -867,18 +838,18 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
// Templates.
UnresolvedSet<8> TemplDecls;
- unsigned NumTemplates = Record[Idx++];
+ unsigned NumTemplates = Record.readInt();
while (NumTemplates--)
- TemplDecls.addDecl(ReadDeclAs<NamedDecl>(Record, Idx));
-
+ TemplDecls.addDecl(ReadDeclAs<NamedDecl>());
+
// Templates args.
TemplateArgumentListInfo TemplArgs;
- unsigned NumArgs = Record[Idx++];
+ unsigned NumArgs = Record.readInt();
while (NumArgs--)
- TemplArgs.addArgument(Reader.ReadTemplateArgumentLoc(F, Record, Idx));
- TemplArgs.setLAngleLoc(ReadSourceLocation(Record, Idx));
- TemplArgs.setRAngleLoc(ReadSourceLocation(Record, Idx));
-
+ TemplArgs.addArgument(Record.readTemplateArgumentLoc());
+ TemplArgs.setLAngleLoc(ReadSourceLocation());
+ TemplArgs.setRAngleLoc(ReadSourceLocation());
+
FD->setDependentTemplateSpecialization(Reader.getContext(),
TemplDecls, TemplArgs);
// These are not merged; we don't need to merge redeclarations of dependent
@@ -888,55 +859,55 @@ void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
}
// Read in the parameters.
- unsigned NumParams = Record[Idx++];
+ unsigned NumParams = Record.readInt();
SmallVector<ParmVarDecl *, 16> Params;
Params.reserve(NumParams);
for (unsigned I = 0; I != NumParams; ++I)
- Params.push_back(ReadDeclAs<ParmVarDecl>(Record, Idx));
+ Params.push_back(ReadDeclAs<ParmVarDecl>());
FD->setParams(Reader.getContext(), Params);
}
void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
VisitNamedDecl(MD);
- if (Record[Idx++]) {
+ if (Record.readInt()) {
// Load the body on-demand. Most clients won't care, because method
// definitions rarely show up in headers.
Reader.PendingBodies[MD] = GetCurrentCursorOffset();
HasPendingBody = true;
- MD->setSelfDecl(ReadDeclAs<ImplicitParamDecl>(Record, Idx));
- MD->setCmdDecl(ReadDeclAs<ImplicitParamDecl>(Record, Idx));
- }
- MD->setInstanceMethod(Record[Idx++]);
- MD->setVariadic(Record[Idx++]);
- MD->setPropertyAccessor(Record[Idx++]);
- MD->setDefined(Record[Idx++]);
- MD->IsOverriding = Record[Idx++];
- MD->HasSkippedBody = Record[Idx++];
-
- MD->IsRedeclaration = Record[Idx++];
- MD->HasRedeclaration = Record[Idx++];
+ MD->setSelfDecl(ReadDeclAs<ImplicitParamDecl>());
+ MD->setCmdDecl(ReadDeclAs<ImplicitParamDecl>());
+ }
+ MD->setInstanceMethod(Record.readInt());
+ MD->setVariadic(Record.readInt());
+ MD->setPropertyAccessor(Record.readInt());
+ MD->setDefined(Record.readInt());
+ MD->IsOverriding = Record.readInt();
+ MD->HasSkippedBody = Record.readInt();
+
+ MD->IsRedeclaration = Record.readInt();
+ MD->HasRedeclaration = Record.readInt();
if (MD->HasRedeclaration)
Reader.getContext().setObjCMethodRedeclaration(MD,
- ReadDeclAs<ObjCMethodDecl>(Record, Idx));
-
- MD->setDeclImplementation((ObjCMethodDecl::ImplementationControl)Record[Idx++]);
- MD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record[Idx++]);
- MD->SetRelatedResultType(Record[Idx++]);
- MD->setReturnType(Reader.readType(F, Record, Idx));
- MD->setReturnTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
- MD->DeclEndLoc = ReadSourceLocation(Record, Idx);
- unsigned NumParams = Record[Idx++];
+ ReadDeclAs<ObjCMethodDecl>());
+
+ MD->setDeclImplementation((ObjCMethodDecl::ImplementationControl)Record.readInt());
+ MD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record.readInt());
+ MD->SetRelatedResultType(Record.readInt());
+ MD->setReturnType(Record.readType());
+ MD->setReturnTypeSourceInfo(GetTypeSourceInfo());
+ MD->DeclEndLoc = ReadSourceLocation();
+ unsigned NumParams = Record.readInt();
SmallVector<ParmVarDecl *, 16> Params;
Params.reserve(NumParams);
for (unsigned I = 0; I != NumParams; ++I)
- Params.push_back(ReadDeclAs<ParmVarDecl>(Record, Idx));
+ Params.push_back(ReadDeclAs<ParmVarDecl>());
- MD->SelLocsKind = Record[Idx++];
- unsigned NumStoredSelLocs = Record[Idx++];
+ MD->SelLocsKind = Record.readInt();
+ unsigned NumStoredSelLocs = Record.readInt();
SmallVector<SourceLocation, 16> SelLocs;
SelLocs.reserve(NumStoredSelLocs);
for (unsigned i = 0; i != NumStoredSelLocs; ++i)
- SelLocs.push_back(ReadSourceLocation(Record, Idx));
+ SelLocs.push_back(ReadSourceLocation());
MD->setParamsAndSelLocs(Reader.getContext(), Params, SelLocs);
}
@@ -944,91 +915,106 @@ void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
void ASTDeclReader::VisitObjCTypeParamDecl(ObjCTypeParamDecl *D) {
VisitTypedefNameDecl(D);
- D->Variance = Record[Idx++];
- D->Index = Record[Idx++];
- D->VarianceLoc = ReadSourceLocation(Record, Idx);
- D->ColonLoc = ReadSourceLocation(Record, Idx);
+ D->Variance = Record.readInt();
+ D->Index = Record.readInt();
+ D->VarianceLoc = ReadSourceLocation();
+ D->ColonLoc = ReadSourceLocation();
}
void ASTDeclReader::VisitObjCContainerDecl(ObjCContainerDecl *CD) {
VisitNamedDecl(CD);
- CD->setAtStartLoc(ReadSourceLocation(Record, Idx));
- CD->setAtEndRange(ReadSourceRange(Record, Idx));
+ CD->setAtStartLoc(ReadSourceLocation());
+ CD->setAtEndRange(ReadSourceRange());
}
ObjCTypeParamList *ASTDeclReader::ReadObjCTypeParamList() {
- unsigned numParams = Record[Idx++];
+ unsigned numParams = Record.readInt();
if (numParams == 0)
return nullptr;
SmallVector<ObjCTypeParamDecl *, 4> typeParams;
typeParams.reserve(numParams);
for (unsigned i = 0; i != numParams; ++i) {
- auto typeParam = ReadDeclAs<ObjCTypeParamDecl>(Record, Idx);
+ auto typeParam = ReadDeclAs<ObjCTypeParamDecl>();
if (!typeParam)
return nullptr;
typeParams.push_back(typeParam);
}
- SourceLocation lAngleLoc = ReadSourceLocation(Record, Idx);
- SourceLocation rAngleLoc = ReadSourceLocation(Record, Idx);
+ SourceLocation lAngleLoc = ReadSourceLocation();
+ SourceLocation rAngleLoc = ReadSourceLocation();
return ObjCTypeParamList::create(Reader.getContext(), lAngleLoc,
typeParams, rAngleLoc);
}
+void ASTDeclReader::ReadObjCDefinitionData(
+ struct ObjCInterfaceDecl::DefinitionData &Data) {
+ // Read the superclass.
+ Data.SuperClassTInfo = GetTypeSourceInfo();
+
+ Data.EndLoc = ReadSourceLocation();
+ Data.HasDesignatedInitializers = Record.readInt();
+
+ // Read the directly referenced protocols and their SourceLocations.
+ unsigned NumProtocols = Record.readInt();
+ SmallVector<ObjCProtocolDecl *, 16> Protocols;
+ Protocols.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>());
+ SmallVector<SourceLocation, 16> ProtoLocs;
+ ProtoLocs.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ ProtoLocs.push_back(ReadSourceLocation());
+ Data.ReferencedProtocols.set(Protocols.data(), NumProtocols, ProtoLocs.data(),
+ Reader.getContext());
+
+ // Read the transitive closure of protocols referenced by this class.
+ NumProtocols = Record.readInt();
+ Protocols.clear();
+ Protocols.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>());
+ Data.AllReferencedProtocols.set(Protocols.data(), NumProtocols,
+ Reader.getContext());
+}
+
+void ASTDeclReader::MergeDefinitionData(ObjCInterfaceDecl *D,
+ struct ObjCInterfaceDecl::DefinitionData &&NewDD) {
+ // FIXME: odr checking?
+}
+
void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
RedeclarableResult Redecl = VisitRedeclarable(ID);
VisitObjCContainerDecl(ID);
- TypeIDForTypeDecl = Reader.getGlobalTypeID(F, Record[Idx++]);
+ TypeIDForTypeDecl = Record.getGlobalTypeID(Record.readInt());
mergeRedeclarable(ID, Redecl);
ID->TypeParamList = ReadObjCTypeParamList();
- if (Record[Idx++]) {
+ if (Record.readInt()) {
// Read the definition.
ID->allocateDefinitionData();
-
- // Set the definition data of the canonical declaration, so other
- // redeclarations will see it.
- ID->getCanonicalDecl()->Data = ID->Data;
-
- ObjCInterfaceDecl::DefinitionData &Data = ID->data();
-
- // Read the superclass.
- Data.SuperClassTInfo = GetTypeSourceInfo(Record, Idx);
- Data.EndLoc = ReadSourceLocation(Record, Idx);
- Data.HasDesignatedInitializers = Record[Idx++];
-
- // Read the directly referenced protocols and their SourceLocations.
- unsigned NumProtocols = Record[Idx++];
- SmallVector<ObjCProtocolDecl *, 16> Protocols;
- Protocols.reserve(NumProtocols);
- for (unsigned I = 0; I != NumProtocols; ++I)
- Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
- SmallVector<SourceLocation, 16> ProtoLocs;
- ProtoLocs.reserve(NumProtocols);
- for (unsigned I = 0; I != NumProtocols; ++I)
- ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
- ID->setProtocolList(Protocols.data(), NumProtocols, ProtoLocs.data(),
- Reader.getContext());
-
- // Read the transitive closure of protocols referenced by this class.
- NumProtocols = Record[Idx++];
- Protocols.clear();
- Protocols.reserve(NumProtocols);
- for (unsigned I = 0; I != NumProtocols; ++I)
- Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
- ID->data().AllReferencedProtocols.set(Protocols.data(), NumProtocols,
- Reader.getContext());
-
- // We will rebuild this list lazily.
- ID->setIvarList(nullptr);
+ ReadObjCDefinitionData(ID->data());
+ ObjCInterfaceDecl *Canon = ID->getCanonicalDecl();
+ if (Canon->Data.getPointer()) {
+ // If we already have a definition, keep the definition invariant and
+ // merge the data.
+ MergeDefinitionData(Canon, std::move(ID->data()));
+ ID->Data = Canon->Data;
+ } else {
+ // Set the definition data of the canonical declaration, so other
+ // redeclarations will see it.
+ ID->getCanonicalDecl()->Data = ID->Data;
+
+ // We will rebuild this list lazily.
+ ID->setIvarList(nullptr);
+ }
// Note that we have deserialized a definition.
Reader.PendingDefinitions.insert(ID);
-
+
// Note that we've loaded this Objective-C class.
Reader.ObjCClassesLoaded.push_back(ID);
} else {
@@ -1038,10 +1024,10 @@ void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
VisitFieldDecl(IVD);
- IVD->setAccessControl((ObjCIvarDecl::AccessControl)Record[Idx++]);
+ IVD->setAccessControl((ObjCIvarDecl::AccessControl)Record.readInt());
// This field will be built lazily.
IVD->setNextIvar(nullptr);
- bool synth = Record[Idx++];
+ bool synth = Record.readInt();
IVD->setSynthesize(synth);
}
@@ -1049,27 +1035,27 @@ void ASTDeclReader::VisitObjCProtocolDecl(ObjCProtocolDecl *PD) {
RedeclarableResult Redecl = VisitRedeclarable(PD);
VisitObjCContainerDecl(PD);
mergeRedeclarable(PD, Redecl);
-
- if (Record[Idx++]) {
+
+ if (Record.readInt()) {
// Read the definition.
PD->allocateDefinitionData();
-
+
// Set the definition data of the canonical declaration, so other
// redeclarations will see it.
PD->getCanonicalDecl()->Data = PD->Data;
- unsigned NumProtoRefs = Record[Idx++];
+ unsigned NumProtoRefs = Record.readInt();
SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
ProtoRefs.reserve(NumProtoRefs);
for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>());
SmallVector<SourceLocation, 16> ProtoLocs;
ProtoLocs.reserve(NumProtoRefs);
for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
+ ProtoLocs.push_back(ReadSourceLocation());
PD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
Reader.getContext());
-
+
// Note that we have deserialized a definition.
Reader.PendingDefinitions.insert(PD);
} else {
@@ -1083,105 +1069,104 @@ void ASTDeclReader::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *FD) {
void ASTDeclReader::VisitObjCCategoryDecl(ObjCCategoryDecl *CD) {
VisitObjCContainerDecl(CD);
- CD->setCategoryNameLoc(ReadSourceLocation(Record, Idx));
- CD->setIvarLBraceLoc(ReadSourceLocation(Record, Idx));
- CD->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
-
+ CD->setCategoryNameLoc(ReadSourceLocation());
+ CD->setIvarLBraceLoc(ReadSourceLocation());
+ CD->setIvarRBraceLoc(ReadSourceLocation());
+
// Note that this category has been deserialized. We do this before
// deserializing the interface declaration, so that it will consider this
/// category.
Reader.CategoriesDeserialized.insert(CD);
- CD->ClassInterface = ReadDeclAs<ObjCInterfaceDecl>(Record, Idx);
+ CD->ClassInterface = ReadDeclAs<ObjCInterfaceDecl>();
CD->TypeParamList = ReadObjCTypeParamList();
- unsigned NumProtoRefs = Record[Idx++];
+ unsigned NumProtoRefs = Record.readInt();
SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
ProtoRefs.reserve(NumProtoRefs);
for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>());
SmallVector<SourceLocation, 16> ProtoLocs;
ProtoLocs.reserve(NumProtoRefs);
for (unsigned I = 0; I != NumProtoRefs; ++I)
- ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
+ ProtoLocs.push_back(ReadSourceLocation());
CD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
Reader.getContext());
}
void ASTDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
VisitNamedDecl(CAD);
- CAD->setClassInterface(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+ CAD->setClassInterface(ReadDeclAs<ObjCInterfaceDecl>());
}
void ASTDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
VisitNamedDecl(D);
- D->setAtLoc(ReadSourceLocation(Record, Idx));
- D->setLParenLoc(ReadSourceLocation(Record, Idx));
- QualType T = Reader.readType(F, Record, Idx);
- TypeSourceInfo *TSI = GetTypeSourceInfo(Record, Idx);
+ D->setAtLoc(ReadSourceLocation());
+ D->setLParenLoc(ReadSourceLocation());
+ QualType T = Record.readType();
+ TypeSourceInfo *TSI = GetTypeSourceInfo();
D->setType(T, TSI);
D->setPropertyAttributes(
- (ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]);
+ (ObjCPropertyDecl::PropertyAttributeKind)Record.readInt());
D->setPropertyAttributesAsWritten(
- (ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]);
+ (ObjCPropertyDecl::PropertyAttributeKind)Record.readInt());
D->setPropertyImplementation(
- (ObjCPropertyDecl::PropertyControl)Record[Idx++]);
- D->setGetterName(Reader.ReadDeclarationName(F,Record, Idx).getObjCSelector());
- D->setSetterName(Reader.ReadDeclarationName(F,Record, Idx).getObjCSelector());
- D->setGetterMethodDecl(ReadDeclAs<ObjCMethodDecl>(Record, Idx));
- D->setSetterMethodDecl(ReadDeclAs<ObjCMethodDecl>(Record, Idx));
- D->setPropertyIvarDecl(ReadDeclAs<ObjCIvarDecl>(Record, Idx));
+ (ObjCPropertyDecl::PropertyControl)Record.readInt());
+ D->setGetterName(Record.readDeclarationName().getObjCSelector());
+ D->setSetterName(Record.readDeclarationName().getObjCSelector());
+ D->setGetterMethodDecl(ReadDeclAs<ObjCMethodDecl>());
+ D->setSetterMethodDecl(ReadDeclAs<ObjCMethodDecl>());
+ D->setPropertyIvarDecl(ReadDeclAs<ObjCIvarDecl>());
}
void ASTDeclReader::VisitObjCImplDecl(ObjCImplDecl *D) {
VisitObjCContainerDecl(D);
- D->setClassInterface(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+ D->setClassInterface(ReadDeclAs<ObjCInterfaceDecl>());
}
void ASTDeclReader::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
VisitObjCImplDecl(D);
- D->setIdentifier(Reader.GetIdentifierInfo(F, Record, Idx));
- D->CategoryNameLoc = ReadSourceLocation(Record, Idx);
+ D->setIdentifier(Record.getIdentifierInfo());
+ D->CategoryNameLoc = ReadSourceLocation();
}
void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
VisitObjCImplDecl(D);
- D->setSuperClass(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
- D->SuperLoc = ReadSourceLocation(Record, Idx);
- D->setIvarLBraceLoc(ReadSourceLocation(Record, Idx));
- D->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
- D->setHasNonZeroConstructors(Record[Idx++]);
- D->setHasDestructors(Record[Idx++]);
- D->NumIvarInitializers = Record[Idx++];
+ D->setSuperClass(ReadDeclAs<ObjCInterfaceDecl>());
+ D->SuperLoc = ReadSourceLocation();
+ D->setIvarLBraceLoc(ReadSourceLocation());
+ D->setIvarRBraceLoc(ReadSourceLocation());
+ D->setHasNonZeroConstructors(Record.readInt());
+ D->setHasDestructors(Record.readInt());
+ D->NumIvarInitializers = Record.readInt();
if (D->NumIvarInitializers)
- D->IvarInitializers = ReadGlobalOffset(F, Record, Idx);
+ D->IvarInitializers = ReadGlobalOffset();
}
void ASTDeclReader::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
VisitDecl(D);
- D->setAtLoc(ReadSourceLocation(Record, Idx));
- D->setPropertyDecl(ReadDeclAs<ObjCPropertyDecl>(Record, Idx));
- D->PropertyIvarDecl = ReadDeclAs<ObjCIvarDecl>(Record, Idx);
- D->IvarLoc = ReadSourceLocation(Record, Idx);
- D->setGetterCXXConstructor(Reader.ReadExpr(F));
- D->setSetterCXXAssignment(Reader.ReadExpr(F));
+ D->setAtLoc(ReadSourceLocation());
+ D->setPropertyDecl(ReadDeclAs<ObjCPropertyDecl>());
+ D->PropertyIvarDecl = ReadDeclAs<ObjCIvarDecl>();
+ D->IvarLoc = ReadSourceLocation();
+ D->setGetterCXXConstructor(Record.readExpr());
+ D->setSetterCXXAssignment(Record.readExpr());
}
void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
VisitDeclaratorDecl(FD);
- FD->Mutable = Record[Idx++];
- if (int BitWidthOrInitializer = Record[Idx++]) {
+ FD->Mutable = Record.readInt();
+ if (int BitWidthOrInitializer = Record.readInt()) {
FD->InitStorage.setInt(
static_cast<FieldDecl::InitStorageKind>(BitWidthOrInitializer - 1));
if (FD->InitStorage.getInt() == FieldDecl::ISK_CapturedVLAType) {
// Read captured variable length array.
- FD->InitStorage.setPointer(
- Reader.readType(F, Record, Idx).getAsOpaquePtr());
+ FD->InitStorage.setPointer(Record.readType().getAsOpaquePtr());
} else {
- FD->InitStorage.setPointer(Reader.ReadExpr(F));
+ FD->InitStorage.setPointer(Record.readExpr());
}
}
if (!FD->getDeclName()) {
- if (FieldDecl *Tmpl = ReadDeclAs<FieldDecl>(Record, Idx))
+ if (FieldDecl *Tmpl = ReadDeclAs<FieldDecl>())
Reader.getContext().setInstantiatedFromUnnamedFieldDecl(FD, Tmpl);
}
mergeMergeable(FD);
@@ -1189,19 +1174,19 @@ void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
void ASTDeclReader::VisitMSPropertyDecl(MSPropertyDecl *PD) {
VisitDeclaratorDecl(PD);
- PD->GetterId = Reader.GetIdentifierInfo(F, Record, Idx);
- PD->SetterId = Reader.GetIdentifierInfo(F, Record, Idx);
+ PD->GetterId = Record.getIdentifierInfo();
+ PD->SetterId = Record.getIdentifierInfo();
}
void ASTDeclReader::VisitIndirectFieldDecl(IndirectFieldDecl *FD) {
VisitValueDecl(FD);
- FD->ChainingSize = Record[Idx++];
+ FD->ChainingSize = Record.readInt();
assert(FD->ChainingSize >= 2 && "Anonymous chaining must be >= 2");
FD->Chaining = new (Reader.getContext())NamedDecl*[FD->ChainingSize];
for (unsigned I = 0; I != FD->ChainingSize; ++I)
- FD->Chaining[I] = ReadDeclAs<NamedDecl>(Record, Idx);
+ FD->Chaining[I] = ReadDeclAs<NamedDecl>();
mergeMergeable(FD);
}
@@ -1210,21 +1195,23 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
RedeclarableResult Redecl = VisitRedeclarable(VD);
VisitDeclaratorDecl(VD);
- VD->VarDeclBits.SClass = (StorageClass)Record[Idx++];
- VD->VarDeclBits.TSCSpec = Record[Idx++];
- VD->VarDeclBits.InitStyle = Record[Idx++];
+ VD->VarDeclBits.SClass = (StorageClass)Record.readInt();
+ VD->VarDeclBits.TSCSpec = Record.readInt();
+ VD->VarDeclBits.InitStyle = Record.readInt();
if (!isa<ParmVarDecl>(VD)) {
- VD->NonParmVarDeclBits.ExceptionVar = Record[Idx++];
- VD->NonParmVarDeclBits.NRVOVariable = Record[Idx++];
- VD->NonParmVarDeclBits.CXXForRangeDecl = Record[Idx++];
- VD->NonParmVarDeclBits.ARCPseudoStrong = Record[Idx++];
- VD->NonParmVarDeclBits.IsInline = Record[Idx++];
- VD->NonParmVarDeclBits.IsInlineSpecified = Record[Idx++];
- VD->NonParmVarDeclBits.IsConstexpr = Record[Idx++];
- VD->NonParmVarDeclBits.IsInitCapture = Record[Idx++];
- VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record[Idx++];
- }
- Linkage VarLinkage = Linkage(Record[Idx++]);
+ VD->NonParmVarDeclBits.IsThisDeclarationADemotedDefinition =
+ Record.readInt();
+ VD->NonParmVarDeclBits.ExceptionVar = Record.readInt();
+ VD->NonParmVarDeclBits.NRVOVariable = Record.readInt();
+ VD->NonParmVarDeclBits.CXXForRangeDecl = Record.readInt();
+ VD->NonParmVarDeclBits.ARCPseudoStrong = Record.readInt();
+ VD->NonParmVarDeclBits.IsInline = Record.readInt();
+ VD->NonParmVarDeclBits.IsInlineSpecified = Record.readInt();
+ VD->NonParmVarDeclBits.IsConstexpr = Record.readInt();
+ VD->NonParmVarDeclBits.IsInitCapture = Record.readInt();
+ VD->NonParmVarDeclBits.PreviousDeclInSameBlockScope = Record.readInt();
+ }
+ Linkage VarLinkage = Linkage(Record.readInt());
VD->setCachedLinkage(VarLinkage);
// Reconstruct the one piece of the IdentifierNamespace that we need.
@@ -1232,9 +1219,9 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
VD->getLexicalDeclContext()->isFunctionOrMethod())
VD->setLocalExternDecl();
- if (uint64_t Val = Record[Idx++]) {
- VD->setInit(Reader.ReadExpr(F));
- if (Val > 1) {
+ if (uint64_t Val = Record.readInt()) {
+ VD->setInit(Record.readExpr());
+ if (Val > 1) { // IsInitKnownICE = 1, IsInitNotICE = 2, IsInitICE = 3
EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
Eval->CheckedICE = true;
Eval->IsICE = Val == 3;
@@ -1244,7 +1231,7 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
enum VarKind {
VarNotTemplate = 0, VarTemplate, StaticDataMemberSpecialization
};
- switch ((VarKind)Record[Idx++]) {
+ switch ((VarKind)Record.readInt()) {
case VarNotTemplate:
// Only true variables (not parameters or implicit parameters) can be
// merged; the other kinds are not really redeclarable at all.
@@ -1254,12 +1241,13 @@ ASTDeclReader::RedeclarableResult ASTDeclReader::VisitVarDeclImpl(VarDecl *VD) {
break;
case VarTemplate:
// Merged when we merge the template.
- VD->setDescribedVarTemplate(ReadDeclAs<VarTemplateDecl>(Record, Idx));
+ VD->setDescribedVarTemplate(ReadDeclAs<VarTemplateDecl>());
break;
case StaticDataMemberSpecialization: { // HasMemberSpecializationInfo.
- VarDecl *Tmpl = ReadDeclAs<VarDecl>(Record, Idx);
- TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
- SourceLocation POI = ReadSourceLocation(Record, Idx);
+ VarDecl *Tmpl = ReadDeclAs<VarDecl>();
+ TemplateSpecializationKind TSK =
+ (TemplateSpecializationKind)Record.readInt();
+ SourceLocation POI = ReadSourceLocation();
Reader.getContext().setInstantiatedFromStaticDataMember(VD, Tmpl, TSK,POI);
mergeRedeclarable(VD, Redecl);
break;
@@ -1275,10 +1263,10 @@ void ASTDeclReader::VisitImplicitParamDecl(ImplicitParamDecl *PD) {
void ASTDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
VisitVarDecl(PD);
- unsigned isObjCMethodParam = Record[Idx++];
- unsigned scopeDepth = Record[Idx++];
- unsigned scopeIndex = Record[Idx++];
- unsigned declQualifier = Record[Idx++];
+ unsigned isObjCMethodParam = Record.readInt();
+ unsigned scopeDepth = Record.readInt();
+ unsigned scopeIndex = Record.readInt();
+ unsigned declQualifier = Record.readInt();
if (isObjCMethodParam) {
assert(scopeDepth == 0);
PD->setObjCMethodScopeInfo(scopeIndex);
@@ -1286,46 +1274,58 @@ void ASTDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
} else {
PD->setScopeInfo(scopeDepth, scopeIndex);
}
- PD->ParmVarDeclBits.IsKNRPromoted = Record[Idx++];
- PD->ParmVarDeclBits.HasInheritedDefaultArg = Record[Idx++];
- if (Record[Idx++]) // hasUninstantiatedDefaultArg.
- PD->setUninstantiatedDefaultArg(Reader.ReadExpr(F));
+ PD->ParmVarDeclBits.IsKNRPromoted = Record.readInt();
+ PD->ParmVarDeclBits.HasInheritedDefaultArg = Record.readInt();
+ if (Record.readInt()) // hasUninstantiatedDefaultArg.
+ PD->setUninstantiatedDefaultArg(Record.readExpr());
// FIXME: If this is a redeclaration of a function from another module, handle
// inheritance of default arguments.
}
+void ASTDeclReader::VisitDecompositionDecl(DecompositionDecl *DD) {
+ VisitVarDecl(DD);
+ BindingDecl **BDs = DD->getTrailingObjects<BindingDecl*>();
+ for (unsigned I = 0; I != DD->NumBindings; ++I)
+ BDs[I] = ReadDeclAs<BindingDecl>();
+}
+
+void ASTDeclReader::VisitBindingDecl(BindingDecl *BD) {
+ VisitValueDecl(BD);
+ BD->Binding = Record.readExpr();
+}
+
void ASTDeclReader::VisitFileScopeAsmDecl(FileScopeAsmDecl *AD) {
VisitDecl(AD);
- AD->setAsmString(cast<StringLiteral>(Reader.ReadExpr(F)));
- AD->setRParenLoc(ReadSourceLocation(Record, Idx));
+ AD->setAsmString(cast<StringLiteral>(Record.readExpr()));
+ AD->setRParenLoc(ReadSourceLocation());
}
void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) {
VisitDecl(BD);
- BD->setBody(cast_or_null<CompoundStmt>(Reader.ReadStmt(F)));
- BD->setSignatureAsWritten(GetTypeSourceInfo(Record, Idx));
- unsigned NumParams = Record[Idx++];
+ BD->setBody(cast_or_null<CompoundStmt>(Record.readStmt()));
+ BD->setSignatureAsWritten(GetTypeSourceInfo());
+ unsigned NumParams = Record.readInt();
SmallVector<ParmVarDecl *, 16> Params;
Params.reserve(NumParams);
for (unsigned I = 0; I != NumParams; ++I)
- Params.push_back(ReadDeclAs<ParmVarDecl>(Record, Idx));
+ Params.push_back(ReadDeclAs<ParmVarDecl>());
BD->setParams(Params);
- BD->setIsVariadic(Record[Idx++]);
- BD->setBlockMissingReturnType(Record[Idx++]);
- BD->setIsConversionFromLambda(Record[Idx++]);
+ BD->setIsVariadic(Record.readInt());
+ BD->setBlockMissingReturnType(Record.readInt());
+ BD->setIsConversionFromLambda(Record.readInt());
- bool capturesCXXThis = Record[Idx++];
- unsigned numCaptures = Record[Idx++];
+ bool capturesCXXThis = Record.readInt();
+ unsigned numCaptures = Record.readInt();
SmallVector<BlockDecl::Capture, 16> captures;
captures.reserve(numCaptures);
for (unsigned i = 0; i != numCaptures; ++i) {
- VarDecl *decl = ReadDeclAs<VarDecl>(Record, Idx);
- unsigned flags = Record[Idx++];
+ VarDecl *decl = ReadDeclAs<VarDecl>();
+ unsigned flags = Record.readInt();
bool byRef = (flags & 1);
bool nested = (flags & 2);
- Expr *copyExpr = ((flags & 4) ? Reader.ReadExpr(F) : nullptr);
+ Expr *copyExpr = ((flags & 4) ? Record.readExpr() : nullptr);
captures.push_back(BlockDecl::Capture(decl, byRef, nested, copyExpr));
}
@@ -1334,35 +1334,40 @@ void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) {
void ASTDeclReader::VisitCapturedDecl(CapturedDecl *CD) {
VisitDecl(CD);
- unsigned ContextParamPos = Record[Idx++];
- CD->setNothrow(Record[Idx++] != 0);
+ unsigned ContextParamPos = Record.readInt();
+ CD->setNothrow(Record.readInt() != 0);
// Body is set by VisitCapturedStmt.
for (unsigned I = 0; I < CD->NumParams; ++I) {
if (I != ContextParamPos)
- CD->setParam(I, ReadDeclAs<ImplicitParamDecl>(Record, Idx));
+ CD->setParam(I, ReadDeclAs<ImplicitParamDecl>());
else
- CD->setContextParam(I, ReadDeclAs<ImplicitParamDecl>(Record, Idx));
+ CD->setContextParam(I, ReadDeclAs<ImplicitParamDecl>());
}
}
void ASTDeclReader::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
VisitDecl(D);
- D->setLanguage((LinkageSpecDecl::LanguageIDs)Record[Idx++]);
- D->setExternLoc(ReadSourceLocation(Record, Idx));
- D->setRBraceLoc(ReadSourceLocation(Record, Idx));
+ D->setLanguage((LinkageSpecDecl::LanguageIDs)Record.readInt());
+ D->setExternLoc(ReadSourceLocation());
+ D->setRBraceLoc(ReadSourceLocation());
+}
+
+void ASTDeclReader::VisitExportDecl(ExportDecl *D) {
+ VisitDecl(D);
+ D->RBraceLoc = ReadSourceLocation();
}
void ASTDeclReader::VisitLabelDecl(LabelDecl *D) {
VisitNamedDecl(D);
- D->setLocStart(ReadSourceLocation(Record, Idx));
+ D->setLocStart(ReadSourceLocation());
}
void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
RedeclarableResult Redecl = VisitRedeclarable(D);
VisitNamedDecl(D);
- D->setInline(Record[Idx++]);
- D->LocStart = ReadSourceLocation(Record, Idx);
- D->RBraceLoc = ReadSourceLocation(Record, Idx);
+ D->setInline(Record.readInt());
+ D->LocStart = ReadSourceLocation();
+ D->RBraceLoc = ReadSourceLocation();
// Defer loading the anonymous namespace until we've finished merging
// this namespace; loading it might load a later declaration of the
@@ -1370,7 +1375,7 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
// get merged before newer ones try to merge.
GlobalDeclID AnonNamespace = 0;
if (Redecl.getFirstID() == ThisDeclID) {
- AnonNamespace = ReadDeclID(Record, Idx);
+ AnonNamespace = ReadDeclID();
} else {
// Link this namespace back to the first declaration, which has already
// been deserialized.
@@ -1384,7 +1389,7 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
// any other module's anonymous namespaces, so don't attach the anonymous
// namespace at all.
NamespaceDecl *Anon = cast<NamespaceDecl>(Reader.GetDecl(AnonNamespace));
- if (F.Kind != MK_ImplicitModule && F.Kind != MK_ExplicitModule)
+ if (!Record.isModule())
D->setAnonymousNamespace(Anon);
}
}
@@ -1392,31 +1397,40 @@ void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
void ASTDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
RedeclarableResult Redecl = VisitRedeclarable(D);
VisitNamedDecl(D);
- D->NamespaceLoc = ReadSourceLocation(Record, Idx);
- D->IdentLoc = ReadSourceLocation(Record, Idx);
- D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
- D->Namespace = ReadDeclAs<NamedDecl>(Record, Idx);
+ D->NamespaceLoc = ReadSourceLocation();
+ D->IdentLoc = ReadSourceLocation();
+ D->QualifierLoc = Record.readNestedNameSpecifierLoc();
+ D->Namespace = ReadDeclAs<NamedDecl>();
mergeRedeclarable(D, Redecl);
}
void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
VisitNamedDecl(D);
- D->setUsingLoc(ReadSourceLocation(Record, Idx));
- D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
- ReadDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record, Idx);
- D->FirstUsingShadow.setPointer(ReadDeclAs<UsingShadowDecl>(Record, Idx));
- D->setTypename(Record[Idx++]);
- if (NamedDecl *Pattern = ReadDeclAs<NamedDecl>(Record, Idx))
+ D->setUsingLoc(ReadSourceLocation());
+ D->QualifierLoc = Record.readNestedNameSpecifierLoc();
+ ReadDeclarationNameLoc(D->DNLoc, D->getDeclName());
+ D->FirstUsingShadow.setPointer(ReadDeclAs<UsingShadowDecl>());
+ D->setTypename(Record.readInt());
+ if (NamedDecl *Pattern = ReadDeclAs<NamedDecl>())
Reader.getContext().setInstantiatedFromUsingDecl(D, Pattern);
mergeMergeable(D);
}
+void ASTDeclReader::VisitUsingPackDecl(UsingPackDecl *D) {
+ VisitNamedDecl(D);
+ D->InstantiatedFrom = ReadDeclAs<NamedDecl>();
+ NamedDecl **Expansions = D->getTrailingObjects<NamedDecl*>();
+ for (unsigned I = 0; I != D->NumExpansions; ++I)
+ Expansions[I] = ReadDeclAs<NamedDecl>();
+ mergeMergeable(D);
+}
+
void ASTDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) {
RedeclarableResult Redecl = VisitRedeclarable(D);
VisitNamedDecl(D);
- D->setTargetDecl(ReadDeclAs<NamedDecl>(Record, Idx));
- D->UsingOrNextShadow = ReadDeclAs<NamedDecl>(Record, Idx);
- UsingShadowDecl *Pattern = ReadDeclAs<UsingShadowDecl>(Record, Idx);
+ D->setTargetDecl(ReadDeclAs<NamedDecl>());
+ D->UsingOrNextShadow = ReadDeclAs<NamedDecl>();
+ UsingShadowDecl *Pattern = ReadDeclAs<UsingShadowDecl>();
if (Pattern)
Reader.getContext().setInstantiatedFromUsingShadowDecl(D, Pattern);
mergeRedeclarable(D, Redecl);
@@ -1425,115 +1439,114 @@ void ASTDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) {
void ASTDeclReader::VisitConstructorUsingShadowDecl(
ConstructorUsingShadowDecl *D) {
VisitUsingShadowDecl(D);
- D->NominatedBaseClassShadowDecl =
- ReadDeclAs<ConstructorUsingShadowDecl>(Record, Idx);
- D->ConstructedBaseClassShadowDecl =
- ReadDeclAs<ConstructorUsingShadowDecl>(Record, Idx);
- D->IsVirtual = Record[Idx++];
+ D->NominatedBaseClassShadowDecl = ReadDeclAs<ConstructorUsingShadowDecl>();
+ D->ConstructedBaseClassShadowDecl = ReadDeclAs<ConstructorUsingShadowDecl>();
+ D->IsVirtual = Record.readInt();
}
void ASTDeclReader::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
VisitNamedDecl(D);
- D->UsingLoc = ReadSourceLocation(Record, Idx);
- D->NamespaceLoc = ReadSourceLocation(Record, Idx);
- D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
- D->NominatedNamespace = ReadDeclAs<NamedDecl>(Record, Idx);
- D->CommonAncestor = ReadDeclAs<DeclContext>(Record, Idx);
+ D->UsingLoc = ReadSourceLocation();
+ D->NamespaceLoc = ReadSourceLocation();
+ D->QualifierLoc = Record.readNestedNameSpecifierLoc();
+ D->NominatedNamespace = ReadDeclAs<NamedDecl>();
+ D->CommonAncestor = ReadDeclAs<DeclContext>();
}
void ASTDeclReader::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
VisitValueDecl(D);
- D->setUsingLoc(ReadSourceLocation(Record, Idx));
- D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
- ReadDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record, Idx);
+ D->setUsingLoc(ReadSourceLocation());
+ D->QualifierLoc = Record.readNestedNameSpecifierLoc();
+ ReadDeclarationNameLoc(D->DNLoc, D->getDeclName());
+ D->EllipsisLoc = ReadSourceLocation();
mergeMergeable(D);
}
void ASTDeclReader::VisitUnresolvedUsingTypenameDecl(
UnresolvedUsingTypenameDecl *D) {
VisitTypeDecl(D);
- D->TypenameLocation = ReadSourceLocation(Record, Idx);
- D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ D->TypenameLocation = ReadSourceLocation();
+ D->QualifierLoc = Record.readNestedNameSpecifierLoc();
+ D->EllipsisLoc = ReadSourceLocation();
mergeMergeable(D);
}
void ASTDeclReader::ReadCXXDefinitionData(
- struct CXXRecordDecl::DefinitionData &Data,
- const RecordData &Record, unsigned &Idx) {
+ struct CXXRecordDecl::DefinitionData &Data) {
// Note: the caller has deserialized the IsLambda bit already.
- Data.UserDeclaredConstructor = Record[Idx++];
- Data.UserDeclaredSpecialMembers = Record[Idx++];
- Data.Aggregate = Record[Idx++];
- Data.PlainOldData = Record[Idx++];
- Data.Empty = Record[Idx++];
- Data.Polymorphic = Record[Idx++];
- Data.Abstract = Record[Idx++];
- Data.IsStandardLayout = Record[Idx++];
- Data.HasNoNonEmptyBases = Record[Idx++];
- Data.HasPrivateFields = Record[Idx++];
- Data.HasProtectedFields = Record[Idx++];
- Data.HasPublicFields = Record[Idx++];
- Data.HasMutableFields = Record[Idx++];
- Data.HasVariantMembers = Record[Idx++];
- Data.HasOnlyCMembers = Record[Idx++];
- Data.HasInClassInitializer = Record[Idx++];
- Data.HasUninitializedReferenceMember = Record[Idx++];
- Data.HasUninitializedFields = Record[Idx++];
- Data.HasInheritedConstructor = Record[Idx++];
- Data.HasInheritedAssignment = Record[Idx++];
- Data.NeedOverloadResolutionForMoveConstructor = Record[Idx++];
- Data.NeedOverloadResolutionForMoveAssignment = Record[Idx++];
- Data.NeedOverloadResolutionForDestructor = Record[Idx++];
- Data.DefaultedMoveConstructorIsDeleted = Record[Idx++];
- Data.DefaultedMoveAssignmentIsDeleted = Record[Idx++];
- Data.DefaultedDestructorIsDeleted = Record[Idx++];
- Data.HasTrivialSpecialMembers = Record[Idx++];
- Data.DeclaredNonTrivialSpecialMembers = Record[Idx++];
- Data.HasIrrelevantDestructor = Record[Idx++];
- Data.HasConstexprNonCopyMoveConstructor = Record[Idx++];
- Data.HasDefaultedDefaultConstructor = Record[Idx++];
- Data.DefaultedDefaultConstructorIsConstexpr = Record[Idx++];
- Data.HasConstexprDefaultConstructor = Record[Idx++];
- Data.HasNonLiteralTypeFieldsOrBases = Record[Idx++];
- Data.ComputedVisibleConversions = Record[Idx++];
- Data.UserProvidedDefaultConstructor = Record[Idx++];
- Data.DeclaredSpecialMembers = Record[Idx++];
- Data.ImplicitCopyConstructorHasConstParam = Record[Idx++];
- Data.ImplicitCopyAssignmentHasConstParam = Record[Idx++];
- Data.HasDeclaredCopyConstructorWithConstParam = Record[Idx++];
- Data.HasDeclaredCopyAssignmentWithConstParam = Record[Idx++];
-
- Data.NumBases = Record[Idx++];
+ Data.UserDeclaredConstructor = Record.readInt();
+ Data.UserDeclaredSpecialMembers = Record.readInt();
+ Data.Aggregate = Record.readInt();
+ Data.PlainOldData = Record.readInt();
+ Data.Empty = Record.readInt();
+ Data.Polymorphic = Record.readInt();
+ Data.Abstract = Record.readInt();
+ Data.IsStandardLayout = Record.readInt();
+ Data.HasNoNonEmptyBases = Record.readInt();
+ Data.HasPrivateFields = Record.readInt();
+ Data.HasProtectedFields = Record.readInt();
+ Data.HasPublicFields = Record.readInt();
+ Data.HasMutableFields = Record.readInt();
+ Data.HasVariantMembers = Record.readInt();
+ Data.HasOnlyCMembers = Record.readInt();
+ Data.HasInClassInitializer = Record.readInt();
+ Data.HasUninitializedReferenceMember = Record.readInt();
+ Data.HasUninitializedFields = Record.readInt();
+ Data.HasInheritedConstructor = Record.readInt();
+ Data.HasInheritedAssignment = Record.readInt();
+ Data.NeedOverloadResolutionForMoveConstructor = Record.readInt();
+ Data.NeedOverloadResolutionForMoveAssignment = Record.readInt();
+ Data.NeedOverloadResolutionForDestructor = Record.readInt();
+ Data.DefaultedMoveConstructorIsDeleted = Record.readInt();
+ Data.DefaultedMoveAssignmentIsDeleted = Record.readInt();
+ Data.DefaultedDestructorIsDeleted = Record.readInt();
+ Data.HasTrivialSpecialMembers = Record.readInt();
+ Data.DeclaredNonTrivialSpecialMembers = Record.readInt();
+ Data.HasIrrelevantDestructor = Record.readInt();
+ Data.HasConstexprNonCopyMoveConstructor = Record.readInt();
+ Data.HasDefaultedDefaultConstructor = Record.readInt();
+ Data.DefaultedDefaultConstructorIsConstexpr = Record.readInt();
+ Data.HasConstexprDefaultConstructor = Record.readInt();
+ Data.HasNonLiteralTypeFieldsOrBases = Record.readInt();
+ Data.ComputedVisibleConversions = Record.readInt();
+ Data.UserProvidedDefaultConstructor = Record.readInt();
+ Data.DeclaredSpecialMembers = Record.readInt();
+ Data.ImplicitCopyConstructorHasConstParam = Record.readInt();
+ Data.ImplicitCopyAssignmentHasConstParam = Record.readInt();
+ Data.HasDeclaredCopyConstructorWithConstParam = Record.readInt();
+ Data.HasDeclaredCopyAssignmentWithConstParam = Record.readInt();
+
+ Data.NumBases = Record.readInt();
if (Data.NumBases)
- Data.Bases = ReadGlobalOffset(F, Record, Idx);
- Data.NumVBases = Record[Idx++];
+ Data.Bases = ReadGlobalOffset();
+ Data.NumVBases = Record.readInt();
if (Data.NumVBases)
- Data.VBases = ReadGlobalOffset(F, Record, Idx);
-
- Reader.ReadUnresolvedSet(F, Data.Conversions, Record, Idx);
- Reader.ReadUnresolvedSet(F, Data.VisibleConversions, Record, Idx);
+ Data.VBases = ReadGlobalOffset();
+
+ Record.readUnresolvedSet(Data.Conversions);
+ Record.readUnresolvedSet(Data.VisibleConversions);
assert(Data.Definition && "Data.Definition should be already set!");
- Data.FirstFriend = ReadDeclID(Record, Idx);
+ Data.FirstFriend = ReadDeclID();
if (Data.IsLambda) {
typedef LambdaCapture Capture;
CXXRecordDecl::LambdaDefinitionData &Lambda
= static_cast<CXXRecordDecl::LambdaDefinitionData &>(Data);
- Lambda.Dependent = Record[Idx++];
- Lambda.IsGenericLambda = Record[Idx++];
- Lambda.CaptureDefault = Record[Idx++];
- Lambda.NumCaptures = Record[Idx++];
- Lambda.NumExplicitCaptures = Record[Idx++];
- Lambda.ManglingNumber = Record[Idx++];
- Lambda.ContextDecl = ReadDecl(Record, Idx);
+ Lambda.Dependent = Record.readInt();
+ Lambda.IsGenericLambda = Record.readInt();
+ Lambda.CaptureDefault = Record.readInt();
+ Lambda.NumCaptures = Record.readInt();
+ Lambda.NumExplicitCaptures = Record.readInt();
+ Lambda.ManglingNumber = Record.readInt();
+ Lambda.ContextDecl = ReadDeclID();
Lambda.Captures
= (Capture*)Reader.Context.Allocate(sizeof(Capture)*Lambda.NumCaptures);
Capture *ToCapture = Lambda.Captures;
- Lambda.MethodTyInfo = GetTypeSourceInfo(Record, Idx);
+ Lambda.MethodTyInfo = GetTypeSourceInfo();
for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
- SourceLocation Loc = ReadSourceLocation(Record, Idx);
- bool IsImplicit = Record[Idx++];
- LambdaCaptureKind Kind = static_cast<LambdaCaptureKind>(Record[Idx++]);
+ SourceLocation Loc = ReadSourceLocation();
+ bool IsImplicit = Record.readInt();
+ LambdaCaptureKind Kind = static_cast<LambdaCaptureKind>(Record.readInt());
switch (Kind) {
case LCK_StarThis:
case LCK_This:
@@ -1542,8 +1555,8 @@ void ASTDeclReader::ReadCXXDefinitionData(
break;
case LCK_ByCopy:
case LCK_ByRef:
- VarDecl *Var = ReadDeclAs<VarDecl>(Record, Idx);
- SourceLocation EllipsisLoc = ReadSourceLocation(Record, Idx);
+ VarDecl *Var = ReadDeclAs<VarDecl>();
+ SourceLocation EllipsisLoc = ReadSourceLocation();
*ToCapture++ = Capture(Loc, IsImplicit, Kind, Var, EllipsisLoc);
break;
}
@@ -1563,7 +1576,7 @@ void ASTDeclReader::MergeDefinitionData(
DD.Definition));
Reader.PendingDefinitions.erase(MergeDD.Definition);
MergeDD.Definition->IsCompleteDefinition = false;
- mergeDefinitionVisibility(DD.Definition, MergeDD.Definition);
+ Reader.mergeDefinitionVisibility(DD.Definition, MergeDD.Definition);
assert(Reader.Lookups.find(MergeDD.Definition) == Reader.Lookups.end() &&
"already loaded pending lookups for merged definition");
}
@@ -1665,14 +1678,14 @@ void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) {
// Determine whether this is a lambda closure type, so that we can
// allocate the appropriate DefinitionData structure.
- bool IsLambda = Record[Idx++];
+ bool IsLambda = Record.readInt();
if (IsLambda)
DD = new (C) CXXRecordDecl::LambdaDefinitionData(D, nullptr, false, false,
LCD_None);
else
DD = new (C) struct CXXRecordDecl::DefinitionData(D);
- ReadCXXDefinitionData(*DD, Record, Idx);
+ ReadCXXDefinitionData(*DD);
// We might already have a definition for this record. This can happen either
// because we're reading an update record, or because we've already done some
@@ -1706,7 +1719,7 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
enum CXXRecKind {
CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
};
- switch ((CXXRecKind)Record[Idx++]) {
+ switch ((CXXRecKind)Record.readInt()) {
case CXXRecNotTemplate:
// Merged when we merge the folding set entry in the primary template.
if (!isa<ClassTemplateSpecializationDecl>(D))
@@ -1714,7 +1727,7 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
break;
case CXXRecTemplate: {
// Merged when we merge the template.
- ClassTemplateDecl *Template = ReadDeclAs<ClassTemplateDecl>(Record, Idx);
+ ClassTemplateDecl *Template = ReadDeclAs<ClassTemplateDecl>();
D->TemplateOrInstantiation = Template;
if (!Template->getTemplatedDecl()) {
// We've not actually loaded the ClassTemplateDecl yet, because we're
@@ -1728,9 +1741,10 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
break;
}
case CXXRecMemberSpecialization: {
- CXXRecordDecl *RD = ReadDeclAs<CXXRecordDecl>(Record, Idx);
- TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
- SourceLocation POI = ReadSourceLocation(Record, Idx);
+ CXXRecordDecl *RD = ReadDeclAs<CXXRecordDecl>();
+ TemplateSpecializationKind TSK =
+ (TemplateSpecializationKind)Record.readInt();
+ SourceLocation POI = ReadSourceLocation();
MemberSpecializationInfo *MSI = new (C) MemberSpecializationInfo(RD, TSK);
MSI->setPointOfInstantiation(POI);
D->TemplateOrInstantiation = MSI;
@@ -1739,7 +1753,7 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
}
}
- bool WasDefinition = Record[Idx++];
+ bool WasDefinition = Record.readInt();
if (WasDefinition)
ReadCXXRecordDefinition(D, /*Update*/false);
else
@@ -1749,7 +1763,7 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
// Lazily load the key function to avoid deserializing every method so we can
// compute it.
if (WasDefinition) {
- DeclID KeyFn = ReadDeclID(Record, Idx);
+ DeclID KeyFn = ReadDeclID();
if (KeyFn && D->IsCompleteDefinition)
// FIXME: This is wrong for the ARM ABI, where some other module may have
// made this function no longer be a key function. We need an update
@@ -1763,18 +1777,18 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
VisitFunctionDecl(D);
- unsigned NumOverridenMethods = Record[Idx++];
+ unsigned NumOverridenMethods = Record.readInt();
if (D->isCanonicalDecl()) {
while (NumOverridenMethods--) {
// Avoid invariant checking of CXXMethodDecl::addOverriddenMethod,
// MD may be initializing.
- if (CXXMethodDecl *MD = ReadDeclAs<CXXMethodDecl>(Record, Idx))
+ if (CXXMethodDecl *MD = ReadDeclAs<CXXMethodDecl>())
Reader.getContext().addOverriddenMethod(D, MD->getCanonicalDecl());
}
} else {
// We don't care about which declarations this used to override; we get
// the relevant information from the canonical declaration.
- Idx += NumOverridenMethods;
+ Record.skipInts(NumOverridenMethods);
}
}
@@ -1782,21 +1796,21 @@ void ASTDeclReader::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
// We need the inherited constructor information to merge the declaration,
// so we have to read it before we call VisitCXXMethodDecl.
if (D->isInheritingConstructor()) {
- auto *Shadow = ReadDeclAs<ConstructorUsingShadowDecl>(Record, Idx);
- auto *Ctor = ReadDeclAs<CXXConstructorDecl>(Record, Idx);
+ auto *Shadow = ReadDeclAs<ConstructorUsingShadowDecl>();
+ auto *Ctor = ReadDeclAs<CXXConstructorDecl>();
*D->getTrailingObjects<InheritedConstructor>() =
InheritedConstructor(Shadow, Ctor);
}
VisitCXXMethodDecl(D);
- D->IsExplicitSpecified = Record[Idx++];
+ D->IsExplicitSpecified = Record.readInt();
}
void ASTDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
VisitCXXMethodDecl(D);
- if (auto *OperatorDelete = ReadDeclAs<FunctionDecl>(Record, Idx)) {
+ if (auto *OperatorDelete = ReadDeclAs<FunctionDecl>()) {
auto *Canon = cast<CXXDestructorDecl>(D->getCanonicalDecl());
// FIXME: Check consistency if we have an old and new operator delete.
if (!Canon->OperatorDelete)
@@ -1806,65 +1820,64 @@ void ASTDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
void ASTDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) {
VisitCXXMethodDecl(D);
- D->IsExplicitSpecified = Record[Idx++];
+ D->IsExplicitSpecified = Record.readInt();
}
void ASTDeclReader::VisitImportDecl(ImportDecl *D) {
VisitDecl(D);
- D->ImportedAndComplete.setPointer(readModule(Record, Idx));
- D->ImportedAndComplete.setInt(Record[Idx++]);
+ D->ImportedAndComplete.setPointer(readModule());
+ D->ImportedAndComplete.setInt(Record.readInt());
SourceLocation *StoredLocs = D->getTrailingObjects<SourceLocation>();
for (unsigned I = 0, N = Record.back(); I != N; ++I)
- StoredLocs[I] = ReadSourceLocation(Record, Idx);
- ++Idx; // The number of stored source locations.
+ StoredLocs[I] = ReadSourceLocation();
+ (void)Record.readInt(); // The number of stored source locations.
}
void ASTDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) {
VisitDecl(D);
- D->setColonLoc(ReadSourceLocation(Record, Idx));
+ D->setColonLoc(ReadSourceLocation());
}
void ASTDeclReader::VisitFriendDecl(FriendDecl *D) {
VisitDecl(D);
- if (Record[Idx++]) // hasFriendDecl
- D->Friend = ReadDeclAs<NamedDecl>(Record, Idx);
+ if (Record.readInt()) // hasFriendDecl
+ D->Friend = ReadDeclAs<NamedDecl>();
else
- D->Friend = GetTypeSourceInfo(Record, Idx);
+ D->Friend = GetTypeSourceInfo();
for (unsigned i = 0; i != D->NumTPLists; ++i)
D->getTrailingObjects<TemplateParameterList *>()[i] =
- Reader.ReadTemplateParameterList(F, Record, Idx);
- D->NextFriend = ReadDeclID(Record, Idx);
- D->UnsupportedFriend = (Record[Idx++] != 0);
- D->FriendLoc = ReadSourceLocation(Record, Idx);
+ Record.readTemplateParameterList();
+ D->NextFriend = ReadDeclID();
+ D->UnsupportedFriend = (Record.readInt() != 0);
+ D->FriendLoc = ReadSourceLocation();
}
void ASTDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
VisitDecl(D);
- unsigned NumParams = Record[Idx++];
+ unsigned NumParams = Record.readInt();
D->NumParams = NumParams;
D->Params = new TemplateParameterList*[NumParams];
for (unsigned i = 0; i != NumParams; ++i)
- D->Params[i] = Reader.ReadTemplateParameterList(F, Record, Idx);
- if (Record[Idx++]) // HasFriendDecl
- D->Friend = ReadDeclAs<NamedDecl>(Record, Idx);
+ D->Params[i] = Record.readTemplateParameterList();
+ if (Record.readInt()) // HasFriendDecl
+ D->Friend = ReadDeclAs<NamedDecl>();
else
- D->Friend = GetTypeSourceInfo(Record, Idx);
- D->FriendLoc = ReadSourceLocation(Record, Idx);
+ D->Friend = GetTypeSourceInfo();
+ D->FriendLoc = ReadSourceLocation();
}
DeclID ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) {
VisitNamedDecl(D);
- DeclID PatternID = ReadDeclID(Record, Idx);
+ DeclID PatternID = ReadDeclID();
NamedDecl *TemplatedDecl = cast_or_null<NamedDecl>(Reader.GetDecl(PatternID));
- TemplateParameterList* TemplateParams
- = Reader.ReadTemplateParameterList(F, Record, Idx);
+ TemplateParameterList *TemplateParams = Record.readTemplateParameterList();
D->init(TemplatedDecl, TemplateParams);
return PatternID;
}
-ASTDeclReader::RedeclarableResult
+ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
RedeclarableResult Redecl = VisitRedeclarable(D);
@@ -1881,17 +1894,17 @@ ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
// for the 'common' pointer.
if (ThisDeclID == Redecl.getFirstID()) {
if (RedeclarableTemplateDecl *RTD
- = ReadDeclAs<RedeclarableTemplateDecl>(Record, Idx)) {
+ = ReadDeclAs<RedeclarableTemplateDecl>()) {
assert(RTD->getKind() == D->getKind() &&
"InstantiatedFromMemberTemplate kind mismatch");
D->setInstantiatedFromMemberTemplate(RTD);
- if (Record[Idx++])
+ if (Record.readInt())
D->setMemberSpecialization();
}
}
DeclID PatternID = VisitTemplateDecl(D);
- D->IdentifierNamespace = Record[Idx++];
+ D->IdentifierNamespace = Record.readInt();
mergeRedeclarable(D, Redecl, PatternID);
@@ -1971,14 +1984,14 @@ ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitClassTemplateSpecializationDeclImpl(
ClassTemplateSpecializationDecl *D) {
RedeclarableResult Redecl = VisitCXXRecordDeclImpl(D);
-
+
ASTContext &C = Reader.getContext();
- if (Decl *InstD = ReadDecl(Record, Idx)) {
+ if (Decl *InstD = ReadDecl()) {
if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(InstD)) {
D->SpecializedTemplate = CTD;
} else {
SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ Record.readTemplateArgumentList(TemplArgs);
TemplateArgumentList *ArgList
= TemplateArgumentList::CreateCopy(C, TemplArgs);
ClassTemplateSpecializationDecl::SpecializedPartialSpecialization *PS
@@ -1992,15 +2005,14 @@ ASTDeclReader::VisitClassTemplateSpecializationDeclImpl(
}
SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx,
- /*Canonicalize*/ true);
+ Record.readTemplateArgumentList(TemplArgs, /*Canonicalize*/ true);
D->TemplateArgs = TemplateArgumentList::CreateCopy(C, TemplArgs);
- D->PointOfInstantiation = ReadSourceLocation(Record, Idx);
- D->SpecializationKind = (TemplateSpecializationKind)Record[Idx++];
+ D->PointOfInstantiation = ReadSourceLocation();
+ D->SpecializationKind = (TemplateSpecializationKind)Record.readInt();
- bool writtenAsCanonicalDecl = Record[Idx++];
+ bool writtenAsCanonicalDecl = Record.readInt();
if (writtenAsCanonicalDecl) {
- ClassTemplateDecl *CanonPattern = ReadDeclAs<ClassTemplateDecl>(Record,Idx);
+ ClassTemplateDecl *CanonPattern = ReadDeclAs<ClassTemplateDecl>();
if (D->isCanonicalDecl()) { // It's kept in the folding set.
// Set this as, or find, the canonical declaration for this specialization
ClassTemplateSpecializationDecl *CanonSpec;
@@ -2030,12 +2042,12 @@ ASTDeclReader::VisitClassTemplateSpecializationDeclImpl(
}
// Explicit info.
- if (TypeSourceInfo *TyInfo = GetTypeSourceInfo(Record, Idx)) {
+ if (TypeSourceInfo *TyInfo = GetTypeSourceInfo()) {
ClassTemplateSpecializationDecl::ExplicitSpecializationInfo *ExplicitInfo
= new (C) ClassTemplateSpecializationDecl::ExplicitSpecializationInfo;
ExplicitInfo->TypeAsWritten = TyInfo;
- ExplicitInfo->ExternLoc = ReadSourceLocation(Record, Idx);
- ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation(Record, Idx);
+ ExplicitInfo->ExternLoc = ReadSourceLocation();
+ ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation();
D->ExplicitInfo = ExplicitInfo;
}
@@ -2046,21 +2058,21 @@ void ASTDeclReader::VisitClassTemplatePartialSpecializationDecl(
ClassTemplatePartialSpecializationDecl *D) {
RedeclarableResult Redecl = VisitClassTemplateSpecializationDeclImpl(D);
- D->TemplateParams = Reader.ReadTemplateParameterList(F, Record, Idx);
- D->ArgsAsWritten = Reader.ReadASTTemplateArgumentListInfo(F, Record, Idx);
+ D->TemplateParams = Record.readTemplateParameterList();
+ D->ArgsAsWritten = Record.readASTTemplateArgumentListInfo();
// These are read/set from/to the first declaration.
if (ThisDeclID == Redecl.getFirstID()) {
D->InstantiatedFromMember.setPointer(
- ReadDeclAs<ClassTemplatePartialSpecializationDecl>(Record, Idx));
- D->InstantiatedFromMember.setInt(Record[Idx++]);
+ ReadDeclAs<ClassTemplatePartialSpecializationDecl>());
+ D->InstantiatedFromMember.setInt(Record.readInt());
}
}
void ASTDeclReader::VisitClassScopeFunctionSpecializationDecl(
ClassScopeFunctionSpecializationDecl *D) {
VisitDecl(D);
- D->Specialization = ReadDeclAs<CXXMethodDecl>(Record, Idx);
+ D->Specialization = ReadDeclAs<CXXMethodDecl>();
}
void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
@@ -2090,12 +2102,12 @@ ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
RedeclarableResult Redecl = VisitVarDeclImpl(D);
ASTContext &C = Reader.getContext();
- if (Decl *InstD = ReadDecl(Record, Idx)) {
+ if (Decl *InstD = ReadDecl()) {
if (VarTemplateDecl *VTD = dyn_cast<VarTemplateDecl>(InstD)) {
D->SpecializedTemplate = VTD;
} else {
SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ Record.readTemplateArgumentList(TemplArgs);
TemplateArgumentList *ArgList = TemplateArgumentList::CreateCopy(
C, TemplArgs);
VarTemplateSpecializationDecl::SpecializedPartialSpecialization *PS =
@@ -2109,25 +2121,24 @@ ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
}
// Explicit info.
- if (TypeSourceInfo *TyInfo = GetTypeSourceInfo(Record, Idx)) {
+ if (TypeSourceInfo *TyInfo = GetTypeSourceInfo()) {
VarTemplateSpecializationDecl::ExplicitSpecializationInfo *ExplicitInfo =
new (C) VarTemplateSpecializationDecl::ExplicitSpecializationInfo;
ExplicitInfo->TypeAsWritten = TyInfo;
- ExplicitInfo->ExternLoc = ReadSourceLocation(Record, Idx);
- ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation(Record, Idx);
+ ExplicitInfo->ExternLoc = ReadSourceLocation();
+ ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation();
D->ExplicitInfo = ExplicitInfo;
}
SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx,
- /*Canonicalize*/ true);
+ Record.readTemplateArgumentList(TemplArgs, /*Canonicalize*/ true);
D->TemplateArgs = TemplateArgumentList::CreateCopy(C, TemplArgs);
- D->PointOfInstantiation = ReadSourceLocation(Record, Idx);
- D->SpecializationKind = (TemplateSpecializationKind)Record[Idx++];
+ D->PointOfInstantiation = ReadSourceLocation();
+ D->SpecializationKind = (TemplateSpecializationKind)Record.readInt();
- bool writtenAsCanonicalDecl = Record[Idx++];
+ bool writtenAsCanonicalDecl = Record.readInt();
if (writtenAsCanonicalDecl) {
- VarTemplateDecl *CanonPattern = ReadDeclAs<VarTemplateDecl>(Record, Idx);
+ VarTemplateDecl *CanonPattern = ReadDeclAs<VarTemplateDecl>();
if (D->isCanonicalDecl()) { // It's kept in the folding set.
// FIXME: If it's already present, merge it.
if (VarTemplatePartialSpecializationDecl *Partial =
@@ -2152,63 +2163,63 @@ void ASTDeclReader::VisitVarTemplatePartialSpecializationDecl(
VarTemplatePartialSpecializationDecl *D) {
RedeclarableResult Redecl = VisitVarTemplateSpecializationDeclImpl(D);
- D->TemplateParams = Reader.ReadTemplateParameterList(F, Record, Idx);
- D->ArgsAsWritten = Reader.ReadASTTemplateArgumentListInfo(F, Record, Idx);
+ D->TemplateParams = Record.readTemplateParameterList();
+ D->ArgsAsWritten = Record.readASTTemplateArgumentListInfo();
// These are read/set from/to the first declaration.
if (ThisDeclID == Redecl.getFirstID()) {
D->InstantiatedFromMember.setPointer(
- ReadDeclAs<VarTemplatePartialSpecializationDecl>(Record, Idx));
- D->InstantiatedFromMember.setInt(Record[Idx++]);
+ ReadDeclAs<VarTemplatePartialSpecializationDecl>());
+ D->InstantiatedFromMember.setInt(Record.readInt());
}
}
void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
VisitTypeDecl(D);
- D->setDeclaredWithTypename(Record[Idx++]);
+ D->setDeclaredWithTypename(Record.readInt());
- if (Record[Idx++])
- D->setDefaultArgument(GetTypeSourceInfo(Record, Idx));
+ if (Record.readInt())
+ D->setDefaultArgument(GetTypeSourceInfo());
}
void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
VisitDeclaratorDecl(D);
// TemplateParmPosition.
- D->setDepth(Record[Idx++]);
- D->setPosition(Record[Idx++]);
+ D->setDepth(Record.readInt());
+ D->setPosition(Record.readInt());
if (D->isExpandedParameterPack()) {
auto TypesAndInfos =
D->getTrailingObjects<std::pair<QualType, TypeSourceInfo *>>();
for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
- new (&TypesAndInfos[I].first) QualType(Reader.readType(F, Record, Idx));
- TypesAndInfos[I].second = GetTypeSourceInfo(Record, Idx);
+ new (&TypesAndInfos[I].first) QualType(Record.readType());
+ TypesAndInfos[I].second = GetTypeSourceInfo();
}
} else {
// Rest of NonTypeTemplateParmDecl.
- D->ParameterPack = Record[Idx++];
- if (Record[Idx++])
- D->setDefaultArgument(Reader.ReadExpr(F));
+ D->ParameterPack = Record.readInt();
+ if (Record.readInt())
+ D->setDefaultArgument(Record.readExpr());
}
}
void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
VisitTemplateDecl(D);
// TemplateParmPosition.
- D->setDepth(Record[Idx++]);
- D->setPosition(Record[Idx++]);
+ D->setDepth(Record.readInt());
+ D->setPosition(Record.readInt());
if (D->isExpandedParameterPack()) {
TemplateParameterList **Data =
D->getTrailingObjects<TemplateParameterList *>();
for (unsigned I = 0, N = D->getNumExpansionTemplateParameters();
I != N; ++I)
- Data[I] = Reader.ReadTemplateParameterList(F, Record, Idx);
+ Data[I] = Record.readTemplateParameterList();
} else {
// Rest of TemplateTemplateParmDecl.
- D->ParameterPack = Record[Idx++];
- if (Record[Idx++])
+ D->ParameterPack = Record.readInt();
+ if (Record.readInt())
D->setDefaultArgument(Reader.getContext(),
- Reader.ReadTemplateArgumentLoc(F, Record, Idx));
+ Record.readTemplateArgumentLoc());
}
}
@@ -2218,10 +2229,10 @@ void ASTDeclReader::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
void ASTDeclReader::VisitStaticAssertDecl(StaticAssertDecl *D) {
VisitDecl(D);
- D->AssertExprAndFailed.setPointer(Reader.ReadExpr(F));
- D->AssertExprAndFailed.setInt(Record[Idx++]);
- D->Message = cast_or_null<StringLiteral>(Reader.ReadExpr(F));
- D->RParenLoc = ReadSourceLocation(Record, Idx);
+ D->AssertExprAndFailed.setPointer(Record.readExpr());
+ D->AssertExprAndFailed.setInt(Record.readInt());
+ D->Message = cast_or_null<StringLiteral>(Record.readExpr());
+ D->RParenLoc = ReadSourceLocation();
}
void ASTDeclReader::VisitEmptyDecl(EmptyDecl *D) {
@@ -2230,15 +2241,15 @@ void ASTDeclReader::VisitEmptyDecl(EmptyDecl *D) {
std::pair<uint64_t, uint64_t>
ASTDeclReader::VisitDeclContext(DeclContext *DC) {
- uint64_t LexicalOffset = ReadLocalOffset(Record, Idx);
- uint64_t VisibleOffset = ReadLocalOffset(Record, Idx);
+ uint64_t LexicalOffset = ReadLocalOffset();
+ uint64_t VisibleOffset = ReadLocalOffset();
return std::make_pair(LexicalOffset, VisibleOffset);
}
template <typename T>
ASTDeclReader::RedeclarableResult
ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
- DeclID FirstDeclID = ReadDeclID(Record, Idx);
+ DeclID FirstDeclID = ReadDeclID();
Decl *MergeWith = nullptr;
bool IsKeyDecl = ThisDeclID == FirstDeclID;
@@ -2252,7 +2263,7 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
FirstDeclID = ThisDeclID;
IsKeyDecl = true;
IsFirstLocalDecl = true;
- } else if (unsigned N = Record[Idx++]) {
+ } else if (unsigned N = Record.readInt()) {
// This declaration was the first local declaration, but may have imported
// other declarations.
IsKeyDecl = N == 1;
@@ -2264,13 +2275,13 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
// FIXME: Provide a known merge target to the second and subsequent such
// declaration.
for (unsigned I = 0; I != N - 1; ++I)
- MergeWith = ReadDecl(Record, Idx/*, MergeWith*/);
+ MergeWith = ReadDecl();
- RedeclOffset = ReadLocalOffset(Record, Idx);
+ RedeclOffset = ReadLocalOffset();
} else {
// This declaration was not the first local declaration. Read the first
// local declaration now, to trigger the import of other redeclarations.
- (void)ReadDecl(Record, Idx);
+ (void)ReadDecl();
}
T *FirstDecl = cast_or_null<T>(Reader.GetDecl(FirstDeclID));
@@ -2281,7 +2292,7 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
// loaded & attached later on.
D->RedeclLink = Redeclarable<T>::PreviousDeclLink(FirstDecl);
D->First = FirstDecl->getCanonicalDecl();
- }
+ }
T *DAsT = static_cast<T*>(D);
@@ -2292,7 +2303,7 @@ ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
if (IsFirstLocalDecl)
Reader.PendingDeclChains.push_back(std::make_pair(DAsT, RedeclOffset));
- return RedeclarableResult(FirstDeclID, MergeWith, IsKeyDecl);
+ return RedeclarableResult(MergeWith, FirstDeclID, IsKeyDecl);
}
/// \brief Attempts to merge the given declaration (D) with another declaration
@@ -2301,8 +2312,6 @@ template<typename T>
void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase,
RedeclarableResult &Redecl,
DeclID TemplatePatternID) {
- T *D = static_cast<T*>(DBase);
-
// If modules are not available, there is no reason to perform this merge.
if (!Reader.getContext().getLangOpts().Modules)
return;
@@ -2311,6 +2320,8 @@ void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *DBase,
if (!DBase->isFirstDecl())
return;
+ T *D = static_cast<T*>(DBase);
+
if (auto *Existing = Redecl.getKnownMergeTarget())
// We already know of an existing declaration we should merge with.
mergeRedeclarable(D, cast<T>(Existing), Redecl, TemplatePatternID);
@@ -2334,8 +2345,9 @@ void ASTDeclReader::mergeTemplatePattern(RedeclarableTemplateDecl *D,
DeclID DsID, bool IsKeyDecl) {
auto *DPattern = D->getTemplatedDecl();
auto *ExistingPattern = Existing->getTemplatedDecl();
- RedeclarableResult Result(DPattern->getCanonicalDecl()->getGlobalID(),
- /*MergeWith*/ ExistingPattern, IsKeyDecl);
+ RedeclarableResult Result(/*MergeWith*/ ExistingPattern,
+ DPattern->getCanonicalDecl()->getGlobalID(),
+ IsKeyDecl);
if (auto *DClass = dyn_cast<CXXRecordDecl>(DPattern)) {
// Merge with any existing definition.
@@ -2436,17 +2448,17 @@ void ASTDeclReader::VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D) {
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i) {
- Vars.push_back(Reader.ReadExpr(F));
+ Vars.push_back(Record.readExpr());
}
D->setVars(Vars);
}
void ASTDeclReader::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
VisitValueDecl(D);
- D->setLocation(Reader.ReadSourceLocation(F, Record, Idx));
- D->setCombiner(Reader.ReadExpr(F));
- D->setInitializer(Reader.ReadExpr(F));
- D->PrevDeclInScope = Reader.ReadDeclID(F, Record, Idx);
+ D->setLocation(ReadSourceLocation());
+ D->setCombiner(Record.readExpr());
+ D->setInitializer(Record.readExpr());
+ D->PrevDeclInScope = ReadDeclID();
}
void ASTDeclReader::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
@@ -2494,10 +2506,16 @@ inline void ASTReader::LoadedDecl(unsigned Index, Decl *D) {
/// This routine should return true for anything that might affect
/// code generation, e.g., inline function definitions, Objective-C
/// declarations with metadata, etc.
-static bool isConsumerInterestedIn(Decl *D, bool HasBody) {
+static bool isConsumerInterestedIn(ASTContext &Ctx, Decl *D, bool HasBody) {
// An ObjCMethodDecl is never considered as "interesting" because its
// implementation container always is.
+ // An ImportDecl or VarDecl imported from a module will get emitted when
+ // we import the relevant module.
+ if ((isa<ImportDecl>(D) || isa<VarDecl>(D)) && Ctx.DeclMustBeEmitted(D) &&
+ D->getImportedOwningModule())
+ return false;
+
if (isa<FileScopeAsmDecl>(D) ||
isa<ObjCProtocolDecl>(D) ||
isa<ObjCImplDecl>(D) ||
@@ -2867,7 +2885,7 @@ static NamedDecl *getDeclForMerging(NamedDecl *Found,
return nullptr;
if (auto *TND = dyn_cast<TypedefNameDecl>(Found))
- return TND->getAnonDeclWithTypedefName();
+ return TND->getAnonDeclWithTypedefName(/*AnyRedecl*/true);
return nullptr;
}
@@ -3043,6 +3061,29 @@ void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
namespace clang {
template<>
void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
+ Redeclarable<VarDecl> *D,
+ Decl *Previous, Decl *Canon) {
+ VarDecl *VD = static_cast<VarDecl*>(D);
+ VarDecl *PrevVD = cast<VarDecl>(Previous);
+ D->RedeclLink.setPrevious(PrevVD);
+ D->First = PrevVD->First;
+
+ // We should keep at most one definition on the chain.
+ // FIXME: Cache the definition once we've found it. Building a chain with
+ // N definitions currently takes O(N^2) time here.
+ if (VD->isThisDeclarationADefinition() == VarDecl::Definition) {
+ for (VarDecl *CurD = PrevVD; CurD; CurD = CurD->getPreviousDecl()) {
+ if (CurD->isThisDeclarationADefinition() == VarDecl::Definition) {
+ Reader.mergeDefinitionVisibility(CurD, VD);
+ VD->demoteThisDefinitionToDeclaration();
+ break;
+ }
+ }
+ }
+}
+
+template<>
+void ASTDeclReader::attachPreviousDeclImpl(ASTReader &Reader,
Redeclarable<FunctionDecl> *D,
Decl *Previous, Decl *Canon) {
FunctionDecl *FD = static_cast<FunctionDecl*>(D);
@@ -3215,13 +3256,12 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
Deserializing ADecl(this);
DeclsCursor.JumpToBit(Loc.Offset);
- RecordData Record;
+ ASTRecordReader Record(*this, *Loc.F);
+ ASTDeclReader Reader(*this, Record, Loc, ID, DeclLoc);
unsigned Code = DeclsCursor.ReadCode();
- unsigned Idx = 0;
- ASTDeclReader Reader(*this, Loc, ID, DeclLoc, Record,Idx);
Decl *D = nullptr;
- switch ((DeclCode)DeclsCursor.readRecord(Code, Record)) {
+ switch ((DeclCode)Record.readRecord(DeclsCursor, Code)) {
case DECL_CONTEXT_LEXICAL:
case DECL_CONTEXT_VISIBLE:
llvm_unreachable("Record cannot be de-serialized with ReadDeclRecord");
@@ -3246,6 +3286,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_LINKAGE_SPEC:
D = LinkageSpecDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_EXPORT:
+ D = ExportDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_LABEL:
D = LabelDecl::CreateDeserialized(Context, ID);
break;
@@ -3258,6 +3301,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_USING:
D = UsingDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_USING_PACK:
+ D = UsingPackDecl::CreateDeserialized(Context, ID, Record.readInt());
+ break;
case DECL_USING_SHADOW:
D = UsingShadowDecl::CreateDeserialized(Context, ID);
break;
@@ -3295,7 +3341,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
D = AccessSpecDecl::CreateDeserialized(Context, ID);
break;
case DECL_FRIEND:
- D = FriendDecl::CreateDeserialized(Context, ID, Record[Idx++]);
+ D = FriendDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
case DECL_FRIEND_TEMPLATE:
D = FriendTemplateDecl::CreateDeserialized(Context, ID);
@@ -3331,14 +3377,15 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID);
break;
case DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK:
- D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID, Record[Idx++]);
+ D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID,
+ Record.readInt());
break;
case DECL_TEMPLATE_TEMPLATE_PARM:
D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID);
break;
case DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK:
D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID,
- Record[Idx++]);
+ Record.readInt());
break;
case DECL_TYPE_ALIAS_TEMPLATE:
D = TypeAliasTemplateDecl::CreateDeserialized(Context, ID);
@@ -3394,6 +3441,12 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_PARM_VAR:
D = ParmVarDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_DECOMPOSITION:
+ D = DecompositionDecl::CreateDeserialized(Context, ID, Record.readInt());
+ break;
+ case DECL_BINDING:
+ D = BindingDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_FILE_SCOPE_ASM:
D = FileScopeAsmDecl::CreateDeserialized(Context, ID);
break;
@@ -3404,7 +3457,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
D = MSPropertyDecl::CreateDeserialized(Context, ID);
break;
case DECL_CAPTURED:
- D = CapturedDecl::CreateDeserialized(Context, ID, Record[Idx++]);
+ D = CapturedDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
case DECL_CXX_BASE_SPECIFIERS:
Error("attempt to read a C++ base-specifier record as a declaration");
@@ -3418,7 +3471,7 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
D = ImportDecl::CreateDeserialized(Context, ID, Record.back());
break;
case DECL_OMP_THREADPRIVATE:
- D = OMPThreadPrivateDecl::CreateDeserialized(Context, ID, Record[Idx++]);
+ D = OMPThreadPrivateDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
case DECL_OMP_DECLARE_REDUCTION:
D = OMPDeclareReductionDecl::CreateDeserialized(Context, ID);
@@ -3427,11 +3480,11 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
D = OMPCapturedExprDecl::CreateDeserialized(Context, ID);
break;
case DECL_PRAGMA_COMMENT:
- D = PragmaCommentDecl::CreateDeserialized(Context, ID, Record[Idx++]);
+ D = PragmaCommentDecl::CreateDeserialized(Context, ID, Record.readInt());
break;
case DECL_PRAGMA_DETECT_MISMATCH:
D = PragmaDetectMismatchDecl::CreateDeserialized(Context, ID,
- Record[Idx++]);
+ Record.readInt());
break;
case DECL_EMPTY:
D = EmptyDecl::CreateDeserialized(Context, ID);
@@ -3460,21 +3513,24 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
ReadVisibleDeclContextStorage(*Loc.F, DeclsCursor, Offsets.second, ID))
return nullptr;
}
- assert(Idx == Record.size());
+ assert(Record.getIdx() == Record.size());
// Load any relevant update records.
PendingUpdateRecords.push_back(std::make_pair(ID, D));
// Load the categories after recursive loading is finished.
if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(D))
- if (Class->isThisDeclarationADefinition())
+ // If we already have a definition when deserializing the ObjCInterfaceDecl,
+ // we put the Decl in PendingDefinitions so we can pull the categories here.
+ if (Class->isThisDeclarationADefinition() ||
+ PendingDefinitions.count(Class))
loadObjCCategories(ID, Class);
// If we have deserialized a declaration that has a definition the
// AST consumer might need to know about, queue it.
// We don't pass it to the consumer immediately because we may be in recursive
// loading, and some declarations may still be initializing.
- if (isConsumerInterestedIn(D, Reader.hasPendingBody()))
+ if (isConsumerInterestedIn(Context, D, Reader.hasPendingBody()))
InterestingDecls.push_back(D);
return D;
@@ -3490,28 +3546,27 @@ void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) {
auto UpdateOffsets = std::move(UpdI->second);
DeclUpdateOffsets.erase(UpdI);
- bool WasInteresting = isConsumerInterestedIn(D, false);
+ bool WasInteresting = isConsumerInterestedIn(Context, D, false);
for (auto &FileAndOffset : UpdateOffsets) {
ModuleFile *F = FileAndOffset.first;
uint64_t Offset = FileAndOffset.second;
llvm::BitstreamCursor &Cursor = F->DeclsCursor;
SavedStreamPosition SavedPosition(Cursor);
Cursor.JumpToBit(Offset);
- RecordData Record;
unsigned Code = Cursor.ReadCode();
- unsigned RecCode = Cursor.readRecord(Code, Record);
+ ASTRecordReader Record(*this, *F);
+ unsigned RecCode = Record.readRecord(Cursor, Code);
(void)RecCode;
assert(RecCode == DECL_UPDATES && "Expected DECL_UPDATES record!");
- unsigned Idx = 0;
- ASTDeclReader Reader(*this, RecordLocation(F, Offset), ID,
- SourceLocation(), Record, Idx);
- Reader.UpdateDecl(D, *F, Record);
+ ASTDeclReader Reader(*this, Record, RecordLocation(F, Offset), ID,
+ SourceLocation());
+ Reader.UpdateDecl(D);
// We might have made this declaration interesting. If so, remember that
// we need to hand it off to the consumer.
if (!WasInteresting &&
- isConsumerInterestedIn(D, Reader.hasPendingBody())) {
+ isConsumerInterestedIn(Context, D, Reader.hasPendingBody())) {
InterestingDecls.push_back(D);
WasInteresting = true;
}
@@ -3578,12 +3633,12 @@ namespace {
/// interface all the categories for it.
class ObjCCategoriesVisitor {
ASTReader &Reader;
- serialization::GlobalDeclID InterfaceID;
ObjCInterfaceDecl *Interface;
llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized;
- unsigned PreviousGeneration;
ObjCCategoryDecl *Tail;
llvm::DenseMap<DeclarationName, ObjCCategoryDecl *> NameCategoryMap;
+ serialization::GlobalDeclID InterfaceID;
+ unsigned PreviousGeneration;
void add(ObjCCategoryDecl *Cat) {
// Only process each category once.
@@ -3626,13 +3681,13 @@ namespace {
public:
ObjCCategoriesVisitor(ASTReader &Reader,
- serialization::GlobalDeclID InterfaceID,
ObjCInterfaceDecl *Interface,
- llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized,
+ llvm::SmallPtrSetImpl<ObjCCategoryDecl *> &Deserialized,
+ serialization::GlobalDeclID InterfaceID,
unsigned PreviousGeneration)
- : Reader(Reader), InterfaceID(InterfaceID), Interface(Interface),
- Deserialized(Deserialized), PreviousGeneration(PreviousGeneration),
- Tail(nullptr)
+ : Reader(Reader), Interface(Interface), Deserialized(Deserialized),
+ Tail(nullptr), InterfaceID(InterfaceID),
+ PreviousGeneration(PreviousGeneration)
{
// Populate the name -> category map with the set of known categories.
for (auto *Cat : Interface->known_categories()) {
@@ -3687,7 +3742,7 @@ namespace {
void ASTReader::loadObjCCategories(serialization::GlobalDeclID ID,
ObjCInterfaceDecl *D,
unsigned PreviousGeneration) {
- ObjCCategoriesVisitor Visitor(*this, ID, D, CategoriesDeserialized,
+ ObjCCategoriesVisitor Visitor(*this, D, CategoriesDeserialized, ID,
PreviousGeneration);
ModuleMgr.visit(Visitor);
}
@@ -3713,15 +3768,14 @@ static void forAllLaterRedecls(DeclT *D, Fn F) {
}
}
-void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
- const RecordData &Record) {
- while (Idx < Record.size()) {
- switch ((DeclUpdateKind)Record[Idx++]) {
+void ASTDeclReader::UpdateDecl(Decl *D) {
+ while (Record.getIdx() < Record.size()) {
+ switch ((DeclUpdateKind)Record.readInt()) {
case UPD_CXX_ADDED_IMPLICIT_MEMBER: {
auto *RD = cast<CXXRecordDecl>(D);
// FIXME: If we also have an update record for instantiating the
// definition of D, we need that to happen before we get here.
- Decl *MD = Reader.ReadDecl(ModuleFile, Record, Idx);
+ Decl *MD = Record.readDecl();
assert(MD && "couldn't read decl from update record");
// FIXME: We should call addHiddenDecl instead, to add the member
// to its DeclContext.
@@ -3731,18 +3785,16 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
// It will be added to the template's specializations set when loaded.
- (void)Reader.ReadDecl(ModuleFile, Record, Idx);
+ (void)Record.readDecl();
break;
case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE: {
- NamespaceDecl *Anon
- = Reader.ReadDeclAs<NamespaceDecl>(ModuleFile, Record, Idx);
-
+ NamespaceDecl *Anon = ReadDeclAs<NamespaceDecl>();
+
// Each module has its own anonymous namespace, which is disjoint from
// any other module's anonymous namespaces, so don't attach the anonymous
// namespace at all.
- if (ModuleFile.Kind != MK_ImplicitModule &&
- ModuleFile.Kind != MK_ExplicitModule) {
+ if (!Record.isModule()) {
if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(D))
TU->setAnonymousNamespace(Anon);
else
@@ -3753,7 +3805,7 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER:
cast<VarDecl>(D)->getMemberSpecializationInfo()->setPointOfInstantiation(
- Reader.ReadSourceLocation(ModuleFile, Record, Idx));
+ ReadSourceLocation());
break;
case UPD_CXX_INSTANTIATED_DEFAULT_ARGUMENT: {
@@ -3762,7 +3814,7 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
// We have to read the default argument regardless of whether we use it
// so that hypothetical further update records aren't messed up.
// TODO: Add a function to skip over the next expr record.
- auto DefaultArg = Reader.ReadExpr(F);
+ auto DefaultArg = Record.readExpr();
// Only apply the update if the parameter still has an uninstantiated
// default argument.
@@ -3771,6 +3823,23 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
break;
}
+ case UPD_CXX_INSTANTIATED_DEFAULT_MEMBER_INITIALIZER: {
+ auto FD = cast<FieldDecl>(D);
+ auto DefaultInit = Record.readExpr();
+
+ // Only apply the update if the field still has an uninstantiated
+ // default member initializer.
+ if (FD->hasInClassInitializer() && !FD->getInClassInitializer()) {
+ if (DefaultInit)
+ FD->setInClassInitializer(DefaultInit);
+ else
+ // Instantiation failed. We can get here if we serialized an AST for
+ // an invalid program.
+ FD->removeInClassInitializer();
+ }
+ break;
+ }
+
case UPD_CXX_ADDED_FUNCTION_DEFINITION: {
FunctionDecl *FD = cast<FunctionDecl>(D);
if (Reader.PendingBodies[FD]) {
@@ -3779,7 +3848,7 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
return;
}
- if (Record[Idx++]) {
+ if (Record.readInt()) {
// Maintain AST consistency: any later redeclarations of this function
// are inline if this one is. (We might have merged another declaration
// into this one.)
@@ -3787,16 +3856,16 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
FD->setImplicitlyInline();
});
}
- FD->setInnerLocStart(Reader.ReadSourceLocation(ModuleFile, Record, Idx));
+ FD->setInnerLocStart(ReadSourceLocation());
if (auto *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- CD->NumCtorInitializers = Record[Idx++];
+ CD->NumCtorInitializers = Record.readInt();
if (CD->NumCtorInitializers)
- CD->CtorInitializers = ReadGlobalOffset(F, Record, Idx);
+ CD->CtorInitializers = ReadGlobalOffset();
}
// Store the offset of the body so we can lazily load it later.
Reader.PendingBodies[FD] = GetCurrentCursorOffset();
HasPendingBody = true;
- assert(Idx == Record.size() && "lazy body must be last");
+ assert(Record.getIdx() == Record.size() && "lazy body must be last");
break;
}
@@ -3809,15 +3878,14 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
ReadCXXRecordDefinition(RD, /*Update*/true);
// Visible update is handled separately.
- uint64_t LexicalOffset = ReadLocalOffset(Record, Idx);
+ uint64_t LexicalOffset = ReadLocalOffset();
if (!HadRealDefinition && LexicalOffset) {
- Reader.ReadLexicalDeclContextStorage(ModuleFile, ModuleFile.DeclsCursor,
- LexicalOffset, RD);
+ Record.readLexicalDeclContextStorage(LexicalOffset, RD);
Reader.PendingFakeDefinitionData.erase(OldDD);
}
- auto TSK = (TemplateSpecializationKind)Record[Idx++];
- SourceLocation POI = Reader.ReadSourceLocation(ModuleFile, Record, Idx);
+ auto TSK = (TemplateSpecializationKind)Record.readInt();
+ SourceLocation POI = ReadSourceLocation();
if (MemberSpecializationInfo *MSInfo =
RD->getMemberSpecializationInfo()) {
MSInfo->setTemplateSpecializationKind(TSK);
@@ -3828,11 +3896,11 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
Spec->setTemplateSpecializationKind(TSK);
Spec->setPointOfInstantiation(POI);
- if (Record[Idx++]) {
+ if (Record.readInt()) {
auto PartialSpec =
- ReadDeclAs<ClassTemplatePartialSpecializationDecl>(Record, Idx);
+ ReadDeclAs<ClassTemplatePartialSpecializationDecl>();
SmallVector<TemplateArgument, 8> TemplArgs;
- Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ Record.readTemplateArgumentList(TemplArgs);
auto *TemplArgList = TemplateArgumentList::CreateCopy(
Reader.getContext(), TemplArgs);
@@ -3844,15 +3912,18 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
}
}
- RD->setTagKind((TagTypeKind)Record[Idx++]);
- RD->setLocation(Reader.ReadSourceLocation(ModuleFile, Record, Idx));
- RD->setLocStart(Reader.ReadSourceLocation(ModuleFile, Record, Idx));
- RD->setBraceRange(Reader.ReadSourceRange(ModuleFile, Record, Idx));
+ RD->setTagKind((TagTypeKind)Record.readInt());
+ RD->setLocation(ReadSourceLocation());
+ RD->setLocStart(ReadSourceLocation());
+ RD->setBraceRange(ReadSourceRange());
- if (Record[Idx++]) {
+ if (Record.readInt()) {
AttrVec Attrs;
- Reader.ReadAttributes(F, Attrs, Record, Idx);
- D->setAttrsImpl(Attrs, Reader.getContext());
+ Record.readAttributes(Attrs);
+ // If the declaration already has attributes, we assume that some other
+ // AST file already loaded them.
+ if (!D->hasAttrs())
+ D->setAttrsImpl(Attrs, Reader.getContext());
}
break;
}
@@ -3860,7 +3931,7 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
case UPD_CXX_RESOLVED_DTOR_DELETE: {
// Set the 'operator delete' directly to avoid emitting another update
// record.
- auto *Del = Reader.ReadDeclAs<FunctionDecl>(ModuleFile, Record, Idx);
+ auto *Del = ReadDeclAs<FunctionDecl>();
auto *First = cast<CXXDestructorDecl>(D->getCanonicalDecl());
// FIXME: Check consistency if we have an old and new operator delete.
if (!First->OperatorDelete)
@@ -3871,7 +3942,7 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
case UPD_CXX_RESOLVED_EXCEPTION_SPEC: {
FunctionProtoType::ExceptionSpecInfo ESI;
SmallVector<QualType, 8> ExceptionStorage;
- Reader.readExceptionSpec(ModuleFile, ExceptionStorage, ESI, Record, Idx);
+ Record.readExceptionSpec(ExceptionStorage, ESI);
// Update this declaration's exception specification, if needed.
auto *FD = cast<FunctionDecl>(D);
@@ -3893,7 +3964,7 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
case UPD_CXX_DEDUCED_RETURN_TYPE: {
// FIXME: Also do this when merging redecls.
- QualType DeducedResultType = Reader.readType(ModuleFile, Record, Idx);
+ QualType DeducedResultType = Record.readType();
for (auto *Redecl : merged_redecls(D)) {
// FIXME: If the return type is already deduced, check that it matches.
FunctionDecl *FD = cast<FunctionDecl>(Redecl);
@@ -3909,20 +3980,20 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
}
case UPD_MANGLING_NUMBER:
- Reader.Context.setManglingNumber(cast<NamedDecl>(D), Record[Idx++]);
+ Reader.Context.setManglingNumber(cast<NamedDecl>(D), Record.readInt());
break;
case UPD_STATIC_LOCAL_NUMBER:
- Reader.Context.setStaticLocalNumber(cast<VarDecl>(D), Record[Idx++]);
+ Reader.Context.setStaticLocalNumber(cast<VarDecl>(D), Record.readInt());
break;
case UPD_DECL_MARKED_OPENMP_THREADPRIVATE:
D->addAttr(OMPThreadPrivateDeclAttr::CreateImplicit(
- Reader.Context, ReadSourceRange(Record, Idx)));
+ Reader.Context, ReadSourceRange()));
break;
case UPD_DECL_EXPORTED: {
- unsigned SubmoduleID = readSubmoduleID(Record, Idx);
+ unsigned SubmoduleID = readSubmoduleID();
auto *Exported = cast<NamedDecl>(D);
if (auto *TD = dyn_cast<TagDecl>(Exported))
Exported = TD->getDefinition();
@@ -3946,7 +4017,7 @@ void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
case UPD_DECL_MARKED_OPENMP_DECLARETARGET:
case UPD_ADDED_ATTR_TO_RECORD:
AttrVec Attrs;
- Reader.ReadAttributes(F, Attrs, Record, Idx);
+ Record.readAttributes(Attrs);
assert(Attrs.size() == 1);
D->addAttr(Attrs[0]);
break;
diff --git a/lib/Serialization/ASTReaderInternals.h b/lib/Serialization/ASTReaderInternals.h
index d392364a971b..6cb4d662e338 100644
--- a/lib/Serialization/ASTReaderInternals.h
+++ b/lib/Serialization/ASTReaderInternals.h
@@ -13,14 +13,12 @@
#ifndef LLVM_CLANG_LIB_SERIALIZATION_ASTREADERINTERNALS_H
#define LLVM_CLANG_LIB_SERIALIZATION_ASTREADERINTERNALS_H
+#include "MultiOnDiskHashTable.h"
#include "clang/AST/DeclarationName.h"
#include "clang/Serialization/ASTBitCodes.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/PointerUnion.h"
-#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/OnDiskHashTable.h"
-#include "MultiOnDiskHashTable.h"
#include <utility>
namespace clang {
@@ -112,17 +110,6 @@ public:
struct DeclContextLookupTable {
MultiOnDiskHashTable<ASTDeclContextNameLookupTrait> Table;
-
- // These look redundant, but don't remove them -- they work around MSVC 2013's
- // inability to synthesize move operations. Without them, the
- // MultiOnDiskHashTable will be copied (despite being move-only!).
- DeclContextLookupTable() : Table() {}
- DeclContextLookupTable(DeclContextLookupTable &&O)
- : Table(std::move(O.Table)) {}
- DeclContextLookupTable &operator=(DeclContextLookupTable &&O) {
- Table = std::move(O.Table);
- return *this;
- }
};
/// \brief Base class for the trait describing the on-disk hash table for the
@@ -259,7 +246,7 @@ public:
struct internal_key_type {
off_t Size;
time_t ModTime;
- const char *Filename;
+ StringRef Filename;
bool Imported;
};
typedef const internal_key_type &internal_key_ref;
diff --git a/lib/Serialization/ASTReaderStmt.cpp b/lib/Serialization/ASTReaderStmt.cpp
index 395da42d4f24..5607f764a9c3 100644
--- a/lib/Serialization/ASTReaderStmt.cpp
+++ b/lib/Serialization/ASTReaderStmt.cpp
@@ -26,62 +26,47 @@ namespace clang {
class ASTStmtReader : public StmtVisitor<ASTStmtReader> {
friend class OMPClauseReader;
- typedef ASTReader::RecordData RecordData;
-
- ASTReader &Reader;
- ModuleFile &F;
+
+ ASTRecordReader &Record;
llvm::BitstreamCursor &DeclsCursor;
- const ASTReader::RecordData &Record;
- unsigned &Idx;
- Token ReadToken(const RecordData &R, unsigned &I) {
- return Reader.ReadToken(F, R, I);
+ SourceLocation ReadSourceLocation() {
+ return Record.readSourceLocation();
}
- SourceLocation ReadSourceLocation(const RecordData &R, unsigned &I) {
- return Reader.ReadSourceLocation(F, R, I);
+ SourceRange ReadSourceRange() {
+ return Record.readSourceRange();
}
- SourceRange ReadSourceRange(const RecordData &R, unsigned &I) {
- return Reader.ReadSourceRange(F, R, I);
+ std::string ReadString() {
+ return Record.readString();
}
- std::string ReadString(const RecordData &R, unsigned &I) {
- return Reader.ReadString(R, I);
- }
-
- TypeSourceInfo *GetTypeSourceInfo(const RecordData &R, unsigned &I) {
- return Reader.GetTypeSourceInfo(F, R, I);
- }
-
- serialization::DeclID ReadDeclID(const RecordData &R, unsigned &I) {
- return Reader.ReadDeclID(F, R, I);
+ TypeSourceInfo *GetTypeSourceInfo() {
+ return Record.getTypeSourceInfo();
}
-
- Decl *ReadDecl(const RecordData &R, unsigned &I) {
- return Reader.ReadDecl(F, R, I);
+
+ Decl *ReadDecl() {
+ return Record.readDecl();
}
-
+
template<typename T>
- T *ReadDeclAs(const RecordData &R, unsigned &I) {
- return Reader.ReadDeclAs<T>(F, R, I);
+ T *ReadDeclAs() {
+ return Record.readDeclAs<T>();
}
- void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc, DeclarationName Name,
- const ASTReader::RecordData &R, unsigned &I) {
- Reader.ReadDeclarationNameLoc(F, DNLoc, Name, R, I);
+ void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc,
+ DeclarationName Name) {
+ Record.readDeclarationNameLoc(DNLoc, Name);
}
-
- void ReadDeclarationNameInfo(DeclarationNameInfo &NameInfo,
- const ASTReader::RecordData &R, unsigned &I) {
- Reader.ReadDeclarationNameInfo(F, NameInfo, R, I);
+
+ void ReadDeclarationNameInfo(DeclarationNameInfo &NameInfo) {
+ Record.readDeclarationNameInfo(NameInfo);
}
public:
- ASTStmtReader(ASTReader &Reader, ModuleFile &F,
- llvm::BitstreamCursor &Cursor,
- const ASTReader::RecordData &Record, unsigned &Idx)
- : Reader(Reader), F(F), DeclsCursor(Cursor), Record(Record), Idx(Idx) { }
+ ASTStmtReader(ASTRecordReader &Record, llvm::BitstreamCursor &Cursor)
+ : Record(Record), DeclsCursor(Cursor) {}
/// \brief The number of record fields required for the Stmt class
/// itself.
@@ -109,106 +94,103 @@ namespace clang {
void ASTStmtReader::ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
TemplateArgumentLoc *ArgsLocArray,
unsigned NumTemplateArgs) {
- SourceLocation TemplateKWLoc = ReadSourceLocation(Record, Idx);
+ SourceLocation TemplateKWLoc = ReadSourceLocation();
TemplateArgumentListInfo ArgInfo;
- ArgInfo.setLAngleLoc(ReadSourceLocation(Record, Idx));
- ArgInfo.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ ArgInfo.setLAngleLoc(ReadSourceLocation());
+ ArgInfo.setRAngleLoc(ReadSourceLocation());
for (unsigned i = 0; i != NumTemplateArgs; ++i)
- ArgInfo.addArgument(
- Reader.ReadTemplateArgumentLoc(F, Record, Idx));
+ ArgInfo.addArgument(Record.readTemplateArgumentLoc());
Args.initializeFrom(TemplateKWLoc, ArgInfo, ArgsLocArray);
}
void ASTStmtReader::VisitStmt(Stmt *S) {
- assert(Idx == NumStmtFields && "Incorrect statement field count");
+ assert(Record.getIdx() == NumStmtFields && "Incorrect statement field count");
}
void ASTStmtReader::VisitNullStmt(NullStmt *S) {
VisitStmt(S);
- S->setSemiLoc(ReadSourceLocation(Record, Idx));
- S->HasLeadingEmptyMacro = Record[Idx++];
+ S->setSemiLoc(ReadSourceLocation());
+ S->HasLeadingEmptyMacro = Record.readInt();
}
void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
VisitStmt(S);
SmallVector<Stmt *, 16> Stmts;
- unsigned NumStmts = Record[Idx++];
+ unsigned NumStmts = Record.readInt();
while (NumStmts--)
- Stmts.push_back(Reader.ReadSubStmt());
- S->setStmts(Reader.getContext(), Stmts);
- S->LBraceLoc = ReadSourceLocation(Record, Idx);
- S->RBraceLoc = ReadSourceLocation(Record, Idx);
+ Stmts.push_back(Record.readSubStmt());
+ S->setStmts(Record.getContext(), Stmts);
+ S->LBraceLoc = ReadSourceLocation();
+ S->RBraceLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitSwitchCase(SwitchCase *S) {
VisitStmt(S);
- Reader.RecordSwitchCaseID(S, Record[Idx++]);
- S->setKeywordLoc(ReadSourceLocation(Record, Idx));
- S->setColonLoc(ReadSourceLocation(Record, Idx));
+ Record.recordSwitchCaseID(S, Record.readInt());
+ S->setKeywordLoc(ReadSourceLocation());
+ S->setColonLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitCaseStmt(CaseStmt *S) {
VisitSwitchCase(S);
- S->setLHS(Reader.ReadSubExpr());
- S->setRHS(Reader.ReadSubExpr());
- S->setSubStmt(Reader.ReadSubStmt());
- S->setEllipsisLoc(ReadSourceLocation(Record, Idx));
+ S->setLHS(Record.readSubExpr());
+ S->setRHS(Record.readSubExpr());
+ S->setSubStmt(Record.readSubStmt());
+ S->setEllipsisLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitDefaultStmt(DefaultStmt *S) {
VisitSwitchCase(S);
- S->setSubStmt(Reader.ReadSubStmt());
+ S->setSubStmt(Record.readSubStmt());
}
void ASTStmtReader::VisitLabelStmt(LabelStmt *S) {
VisitStmt(S);
- LabelDecl *LD = ReadDeclAs<LabelDecl>(Record, Idx);
+ LabelDecl *LD = ReadDeclAs<LabelDecl>();
LD->setStmt(S);
S->setDecl(LD);
- S->setSubStmt(Reader.ReadSubStmt());
- S->setIdentLoc(ReadSourceLocation(Record, Idx));
+ S->setSubStmt(Record.readSubStmt());
+ S->setIdentLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitAttributedStmt(AttributedStmt *S) {
VisitStmt(S);
- uint64_t NumAttrs = Record[Idx++];
+ uint64_t NumAttrs = Record.readInt();
AttrVec Attrs;
- Reader.ReadAttributes(F, Attrs, Record, Idx);
+ Record.readAttributes(Attrs);
(void)NumAttrs;
assert(NumAttrs == S->NumAttrs);
assert(NumAttrs == Attrs.size());
std::copy(Attrs.begin(), Attrs.end(), S->getAttrArrayPtr());
- S->SubStmt = Reader.ReadSubStmt();
- S->AttrLoc = ReadSourceLocation(Record, Idx);
+ S->SubStmt = Record.readSubStmt();
+ S->AttrLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitIfStmt(IfStmt *S) {
VisitStmt(S);
- S->setConstexpr(Record[Idx++]);
- S->setInit(Reader.ReadSubStmt());
- S->setConditionVariable(Reader.getContext(),
- ReadDeclAs<VarDecl>(Record, Idx));
- S->setCond(Reader.ReadSubExpr());
- S->setThen(Reader.ReadSubStmt());
- S->setElse(Reader.ReadSubStmt());
- S->setIfLoc(ReadSourceLocation(Record, Idx));
- S->setElseLoc(ReadSourceLocation(Record, Idx));
+ S->setConstexpr(Record.readInt());
+ S->setInit(Record.readSubStmt());
+ S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+ S->setCond(Record.readSubExpr());
+ S->setThen(Record.readSubStmt());
+ S->setElse(Record.readSubStmt());
+ S->setIfLoc(ReadSourceLocation());
+ S->setElseLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
VisitStmt(S);
- S->setInit(Reader.ReadSubStmt());
- S->setConditionVariable(Reader.getContext(),
- ReadDeclAs<VarDecl>(Record, Idx));
- S->setCond(Reader.ReadSubExpr());
- S->setBody(Reader.ReadSubStmt());
- S->setSwitchLoc(ReadSourceLocation(Record, Idx));
- if (Record[Idx++])
+ S->setInit(Record.readSubStmt());
+ S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+ S->setCond(Record.readSubExpr());
+ S->setBody(Record.readSubStmt());
+ S->setSwitchLoc(ReadSourceLocation());
+ if (Record.readInt())
S->setAllEnumCasesCovered();
SwitchCase *PrevSC = nullptr;
- for (unsigned N = Record.size(); Idx != N; ++Idx) {
- SwitchCase *SC = Reader.getSwitchCaseWithID(Record[Idx]);
+ for (auto E = Record.size(); Record.getIdx() != E; ) {
+ SwitchCase *SC = Record.getSwitchCaseWithID(Record.readInt());
if (PrevSC)
PrevSC->setNextSwitchCase(SC);
else
@@ -220,81 +202,80 @@ void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
void ASTStmtReader::VisitWhileStmt(WhileStmt *S) {
VisitStmt(S);
- S->setConditionVariable(Reader.getContext(),
- ReadDeclAs<VarDecl>(Record, Idx));
+ S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
- S->setCond(Reader.ReadSubExpr());
- S->setBody(Reader.ReadSubStmt());
- S->setWhileLoc(ReadSourceLocation(Record, Idx));
+ S->setCond(Record.readSubExpr());
+ S->setBody(Record.readSubStmt());
+ S->setWhileLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitDoStmt(DoStmt *S) {
VisitStmt(S);
- S->setCond(Reader.ReadSubExpr());
- S->setBody(Reader.ReadSubStmt());
- S->setDoLoc(ReadSourceLocation(Record, Idx));
- S->setWhileLoc(ReadSourceLocation(Record, Idx));
- S->setRParenLoc(ReadSourceLocation(Record, Idx));
+ S->setCond(Record.readSubExpr());
+ S->setBody(Record.readSubStmt());
+ S->setDoLoc(ReadSourceLocation());
+ S->setWhileLoc(ReadSourceLocation());
+ S->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitForStmt(ForStmt *S) {
VisitStmt(S);
- S->setInit(Reader.ReadSubStmt());
- S->setCond(Reader.ReadSubExpr());
- S->setConditionVariable(Reader.getContext(),
- ReadDeclAs<VarDecl>(Record, Idx));
- S->setInc(Reader.ReadSubExpr());
- S->setBody(Reader.ReadSubStmt());
- S->setForLoc(ReadSourceLocation(Record, Idx));
- S->setLParenLoc(ReadSourceLocation(Record, Idx));
- S->setRParenLoc(ReadSourceLocation(Record, Idx));
+ S->setInit(Record.readSubStmt());
+ S->setCond(Record.readSubExpr());
+ S->setConditionVariable(Record.getContext(), ReadDeclAs<VarDecl>());
+ S->setInc(Record.readSubExpr());
+ S->setBody(Record.readSubStmt());
+ S->setForLoc(ReadSourceLocation());
+ S->setLParenLoc(ReadSourceLocation());
+ S->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitGotoStmt(GotoStmt *S) {
VisitStmt(S);
- S->setLabel(ReadDeclAs<LabelDecl>(Record, Idx));
- S->setGotoLoc(ReadSourceLocation(Record, Idx));
- S->setLabelLoc(ReadSourceLocation(Record, Idx));
+ S->setLabel(ReadDeclAs<LabelDecl>());
+ S->setGotoLoc(ReadSourceLocation());
+ S->setLabelLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
VisitStmt(S);
- S->setGotoLoc(ReadSourceLocation(Record, Idx));
- S->setStarLoc(ReadSourceLocation(Record, Idx));
- S->setTarget(Reader.ReadSubExpr());
+ S->setGotoLoc(ReadSourceLocation());
+ S->setStarLoc(ReadSourceLocation());
+ S->setTarget(Record.readSubExpr());
}
void ASTStmtReader::VisitContinueStmt(ContinueStmt *S) {
VisitStmt(S);
- S->setContinueLoc(ReadSourceLocation(Record, Idx));
+ S->setContinueLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitBreakStmt(BreakStmt *S) {
VisitStmt(S);
- S->setBreakLoc(ReadSourceLocation(Record, Idx));
+ S->setBreakLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitReturnStmt(ReturnStmt *S) {
VisitStmt(S);
- S->setRetValue(Reader.ReadSubExpr());
- S->setReturnLoc(ReadSourceLocation(Record, Idx));
- S->setNRVOCandidate(ReadDeclAs<VarDecl>(Record, Idx));
+ S->setRetValue(Record.readSubExpr());
+ S->setReturnLoc(ReadSourceLocation());
+ S->setNRVOCandidate(ReadDeclAs<VarDecl>());
}
void ASTStmtReader::VisitDeclStmt(DeclStmt *S) {
VisitStmt(S);
- S->setStartLoc(ReadSourceLocation(Record, Idx));
- S->setEndLoc(ReadSourceLocation(Record, Idx));
+ S->setStartLoc(ReadSourceLocation());
+ S->setEndLoc(ReadSourceLocation());
- if (Idx + 1 == Record.size()) {
+ if (Record.size() - Record.getIdx() == 1) {
// Single declaration
- S->setDeclGroup(DeclGroupRef(ReadDecl(Record, Idx)));
+ S->setDeclGroup(DeclGroupRef(ReadDecl()));
} else {
SmallVector<Decl *, 16> Decls;
- Decls.reserve(Record.size() - Idx);
- for (unsigned N = Record.size(); Idx != N; )
- Decls.push_back(ReadDecl(Record, Idx));
- S->setDeclGroup(DeclGroupRef(DeclGroup::Create(Reader.getContext(),
+ int N = Record.size() - Record.getIdx();
+ Decls.reserve(N);
+ for (int I = 0; I < N; ++I)
+ Decls.push_back(ReadDecl());
+ S->setDeclGroup(DeclGroupRef(DeclGroup::Create(Record.getContext(),
Decls.data(),
Decls.size())));
}
@@ -302,18 +283,18 @@ void ASTStmtReader::VisitDeclStmt(DeclStmt *S) {
void ASTStmtReader::VisitAsmStmt(AsmStmt *S) {
VisitStmt(S);
- S->NumOutputs = Record[Idx++];
- S->NumInputs = Record[Idx++];
- S->NumClobbers = Record[Idx++];
- S->setAsmLoc(ReadSourceLocation(Record, Idx));
- S->setVolatile(Record[Idx++]);
- S->setSimple(Record[Idx++]);
+ S->NumOutputs = Record.readInt();
+ S->NumInputs = Record.readInt();
+ S->NumClobbers = Record.readInt();
+ S->setAsmLoc(ReadSourceLocation());
+ S->setVolatile(Record.readInt());
+ S->setSimple(Record.readInt());
}
void ASTStmtReader::VisitGCCAsmStmt(GCCAsmStmt *S) {
VisitAsmStmt(S);
- S->setRParenLoc(ReadSourceLocation(Record, Idx));
- S->setAsmString(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
+ S->setRParenLoc(ReadSourceLocation());
+ S->setAsmString(cast_or_null<StringLiteral>(Record.readSubStmt()));
unsigned NumOutputs = S->getNumOutputs();
unsigned NumInputs = S->getNumInputs();
@@ -324,34 +305,34 @@ void ASTStmtReader::VisitGCCAsmStmt(GCCAsmStmt *S) {
SmallVector<StringLiteral*, 16> Constraints;
SmallVector<Stmt*, 16> Exprs;
for (unsigned I = 0, N = NumOutputs + NumInputs; I != N; ++I) {
- Names.push_back(Reader.GetIdentifierInfo(F, Record, Idx));
- Constraints.push_back(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
- Exprs.push_back(Reader.ReadSubStmt());
+ Names.push_back(Record.getIdentifierInfo());
+ Constraints.push_back(cast_or_null<StringLiteral>(Record.readSubStmt()));
+ Exprs.push_back(Record.readSubStmt());
}
// Constraints
SmallVector<StringLiteral*, 16> Clobbers;
for (unsigned I = 0; I != NumClobbers; ++I)
- Clobbers.push_back(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
+ Clobbers.push_back(cast_or_null<StringLiteral>(Record.readSubStmt()));
- S->setOutputsAndInputsAndClobbers(Reader.getContext(),
- Names.data(), Constraints.data(),
- Exprs.data(), NumOutputs, NumInputs,
+ S->setOutputsAndInputsAndClobbers(Record.getContext(),
+ Names.data(), Constraints.data(),
+ Exprs.data(), NumOutputs, NumInputs,
Clobbers.data(), NumClobbers);
}
void ASTStmtReader::VisitMSAsmStmt(MSAsmStmt *S) {
VisitAsmStmt(S);
- S->LBraceLoc = ReadSourceLocation(Record, Idx);
- S->EndLoc = ReadSourceLocation(Record, Idx);
- S->NumAsmToks = Record[Idx++];
- std::string AsmStr = ReadString(Record, Idx);
+ S->LBraceLoc = ReadSourceLocation();
+ S->EndLoc = ReadSourceLocation();
+ S->NumAsmToks = Record.readInt();
+ std::string AsmStr = ReadString();
// Read the tokens.
SmallVector<Token, 16> AsmToks;
AsmToks.reserve(S->NumAsmToks);
for (unsigned i = 0, e = S->NumAsmToks; i != e; ++i) {
- AsmToks.push_back(ReadToken(Record, Idx));
+ AsmToks.push_back(Record.readToken());
}
// The calls to reserve() for the FooData vectors are mandatory to
@@ -363,7 +344,7 @@ void ASTStmtReader::VisitMSAsmStmt(MSAsmStmt *S) {
ClobbersData.reserve(S->NumClobbers);
Clobbers.reserve(S->NumClobbers);
for (unsigned i = 0, e = S->NumClobbers; i != e; ++i) {
- ClobbersData.push_back(ReadString(Record, Idx));
+ ClobbersData.push_back(ReadString());
Clobbers.push_back(ClobbersData.back());
}
@@ -376,12 +357,12 @@ void ASTStmtReader::VisitMSAsmStmt(MSAsmStmt *S) {
ConstraintsData.reserve(NumOperands);
Constraints.reserve(NumOperands);
for (unsigned i = 0; i != NumOperands; ++i) {
- Exprs.push_back(cast<Expr>(Reader.ReadSubStmt()));
- ConstraintsData.push_back(ReadString(Record, Idx));
+ Exprs.push_back(cast<Expr>(Record.readSubStmt()));
+ ConstraintsData.push_back(ReadString());
Constraints.push_back(ConstraintsData.back());
}
- S->initialize(Reader.getContext(), AsmStr, AsmToks,
+ S->initialize(Record.getContext(), AsmStr, AsmToks,
Constraints, Exprs, Clobbers);
}
@@ -407,229 +388,229 @@ void ASTStmtReader::VisitCoyieldExpr(CoyieldExpr *S) {
void ASTStmtReader::VisitCapturedStmt(CapturedStmt *S) {
VisitStmt(S);
- ++Idx;
- S->setCapturedDecl(ReadDeclAs<CapturedDecl>(Record, Idx));
- S->setCapturedRegionKind(static_cast<CapturedRegionKind>(Record[Idx++]));
- S->setCapturedRecordDecl(ReadDeclAs<RecordDecl>(Record, Idx));
+ Record.skipInts(1);
+ S->setCapturedDecl(ReadDeclAs<CapturedDecl>());
+ S->setCapturedRegionKind(static_cast<CapturedRegionKind>(Record.readInt()));
+ S->setCapturedRecordDecl(ReadDeclAs<RecordDecl>());
// Capture inits
for (CapturedStmt::capture_init_iterator I = S->capture_init_begin(),
E = S->capture_init_end();
I != E; ++I)
- *I = Reader.ReadSubExpr();
+ *I = Record.readSubExpr();
// Body
- S->setCapturedStmt(Reader.ReadSubStmt());
+ S->setCapturedStmt(Record.readSubStmt());
S->getCapturedDecl()->setBody(S->getCapturedStmt());
// Captures
for (auto &I : S->captures()) {
- I.VarAndKind.setPointer(ReadDeclAs<VarDecl>(Record, Idx));
- I.VarAndKind
- .setInt(static_cast<CapturedStmt::VariableCaptureKind>(Record[Idx++]));
- I.Loc = ReadSourceLocation(Record, Idx);
+ I.VarAndKind.setPointer(ReadDeclAs<VarDecl>());
+ I.VarAndKind.setInt(
+ static_cast<CapturedStmt::VariableCaptureKind>(Record.readInt()));
+ I.Loc = ReadSourceLocation();
}
}
void ASTStmtReader::VisitExpr(Expr *E) {
VisitStmt(E);
- E->setType(Reader.readType(F, Record, Idx));
- E->setTypeDependent(Record[Idx++]);
- E->setValueDependent(Record[Idx++]);
- E->setInstantiationDependent(Record[Idx++]);
- E->ExprBits.ContainsUnexpandedParameterPack = Record[Idx++];
- E->setValueKind(static_cast<ExprValueKind>(Record[Idx++]));
- E->setObjectKind(static_cast<ExprObjectKind>(Record[Idx++]));
- assert(Idx == NumExprFields && "Incorrect expression field count");
+ E->setType(Record.readType());
+ E->setTypeDependent(Record.readInt());
+ E->setValueDependent(Record.readInt());
+ E->setInstantiationDependent(Record.readInt());
+ E->ExprBits.ContainsUnexpandedParameterPack = Record.readInt();
+ E->setValueKind(static_cast<ExprValueKind>(Record.readInt()));
+ E->setObjectKind(static_cast<ExprObjectKind>(Record.readInt()));
+ assert(Record.getIdx() == NumExprFields &&
+ "Incorrect expression field count");
}
void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
- E->setLocation(ReadSourceLocation(Record, Idx));
- E->Type = (PredefinedExpr::IdentType)Record[Idx++];
- E->FnName = cast_or_null<StringLiteral>(Reader.ReadSubExpr());
+ E->setLocation(ReadSourceLocation());
+ E->Type = (PredefinedExpr::IdentType)Record.readInt();
+ E->FnName = cast_or_null<StringLiteral>(Record.readSubExpr());
}
void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
VisitExpr(E);
- E->DeclRefExprBits.HasQualifier = Record[Idx++];
- E->DeclRefExprBits.HasFoundDecl = Record[Idx++];
- E->DeclRefExprBits.HasTemplateKWAndArgsInfo = Record[Idx++];
- E->DeclRefExprBits.HadMultipleCandidates = Record[Idx++];
- E->DeclRefExprBits.RefersToEnclosingVariableOrCapture = Record[Idx++];
+ E->DeclRefExprBits.HasQualifier = Record.readInt();
+ E->DeclRefExprBits.HasFoundDecl = Record.readInt();
+ E->DeclRefExprBits.HasTemplateKWAndArgsInfo = Record.readInt();
+ E->DeclRefExprBits.HadMultipleCandidates = Record.readInt();
+ E->DeclRefExprBits.RefersToEnclosingVariableOrCapture = Record.readInt();
unsigned NumTemplateArgs = 0;
if (E->hasTemplateKWAndArgsInfo())
- NumTemplateArgs = Record[Idx++];
+ NumTemplateArgs = Record.readInt();
if (E->hasQualifier())
new (E->getTrailingObjects<NestedNameSpecifierLoc>())
- NestedNameSpecifierLoc(
- Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
+ NestedNameSpecifierLoc(Record.readNestedNameSpecifierLoc());
if (E->hasFoundDecl())
- *E->getTrailingObjects<NamedDecl *>() = ReadDeclAs<NamedDecl>(Record, Idx);
+ *E->getTrailingObjects<NamedDecl *>() = ReadDeclAs<NamedDecl>();
if (E->hasTemplateKWAndArgsInfo())
ReadTemplateKWAndArgsInfo(
*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
E->getTrailingObjects<TemplateArgumentLoc>(), NumTemplateArgs);
- E->setDecl(ReadDeclAs<ValueDecl>(Record, Idx));
- E->setLocation(ReadSourceLocation(Record, Idx));
- ReadDeclarationNameLoc(E->DNLoc, E->getDecl()->getDeclName(), Record, Idx);
+ E->setDecl(ReadDeclAs<ValueDecl>());
+ E->setLocation(ReadSourceLocation());
+ ReadDeclarationNameLoc(E->DNLoc, E->getDecl()->getDeclName());
}
void ASTStmtReader::VisitIntegerLiteral(IntegerLiteral *E) {
VisitExpr(E);
- E->setLocation(ReadSourceLocation(Record, Idx));
- E->setValue(Reader.getContext(), Reader.ReadAPInt(Record, Idx));
+ E->setLocation(ReadSourceLocation());
+ E->setValue(Record.getContext(), Record.readAPInt());
}
void ASTStmtReader::VisitFloatingLiteral(FloatingLiteral *E) {
VisitExpr(E);
- E->setRawSemantics(static_cast<Stmt::APFloatSemantics>(Record[Idx++]));
- E->setExact(Record[Idx++]);
- E->setValue(Reader.getContext(),
- Reader.ReadAPFloat(Record, E->getSemantics(), Idx));
- E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setRawSemantics(static_cast<Stmt::APFloatSemantics>(Record.readInt()));
+ E->setExact(Record.readInt());
+ E->setValue(Record.getContext(), Record.readAPFloat(E->getSemantics()));
+ E->setLocation(ReadSourceLocation());
}
void ASTStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) {
VisitExpr(E);
- E->setSubExpr(Reader.ReadSubExpr());
+ E->setSubExpr(Record.readSubExpr());
}
void ASTStmtReader::VisitStringLiteral(StringLiteral *E) {
VisitExpr(E);
- unsigned Len = Record[Idx++];
- assert(Record[Idx] == E->getNumConcatenated() &&
+ unsigned Len = Record.readInt();
+ assert(Record.peekInt() == E->getNumConcatenated() &&
"Wrong number of concatenated tokens!");
- ++Idx;
+ Record.skipInts(1);
StringLiteral::StringKind kind =
- static_cast<StringLiteral::StringKind>(Record[Idx++]);
- bool isPascal = Record[Idx++];
+ static_cast<StringLiteral::StringKind>(Record.readInt());
+ bool isPascal = Record.readInt();
// Read string data
- SmallString<16> Str(&Record[Idx], &Record[Idx] + Len);
- E->setString(Reader.getContext(), Str, kind, isPascal);
- Idx += Len;
+ auto B = &Record.peekInt();
+ SmallString<16> Str(B, B + Len);
+ E->setString(Record.getContext(), Str, kind, isPascal);
+ Record.skipInts(Len);
// Read source locations
for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
- E->setStrTokenLoc(I, ReadSourceLocation(Record, Idx));
+ E->setStrTokenLoc(I, ReadSourceLocation());
}
void ASTStmtReader::VisitCharacterLiteral(CharacterLiteral *E) {
VisitExpr(E);
- E->setValue(Record[Idx++]);
- E->setLocation(ReadSourceLocation(Record, Idx));
- E->setKind(static_cast<CharacterLiteral::CharacterKind>(Record[Idx++]));
+ E->setValue(Record.readInt());
+ E->setLocation(ReadSourceLocation());
+ E->setKind(static_cast<CharacterLiteral::CharacterKind>(Record.readInt()));
}
void ASTStmtReader::VisitParenExpr(ParenExpr *E) {
VisitExpr(E);
- E->setLParen(ReadSourceLocation(Record, Idx));
- E->setRParen(ReadSourceLocation(Record, Idx));
- E->setSubExpr(Reader.ReadSubExpr());
+ E->setLParen(ReadSourceLocation());
+ E->setRParen(ReadSourceLocation());
+ E->setSubExpr(Record.readSubExpr());
}
void ASTStmtReader::VisitParenListExpr(ParenListExpr *E) {
VisitExpr(E);
- unsigned NumExprs = Record[Idx++];
- E->Exprs = new (Reader.getContext()) Stmt*[NumExprs];
+ unsigned NumExprs = Record.readInt();
+ E->Exprs = new (Record.getContext()) Stmt*[NumExprs];
for (unsigned i = 0; i != NumExprs; ++i)
- E->Exprs[i] = Reader.ReadSubStmt();
+ E->Exprs[i] = Record.readSubStmt();
E->NumExprs = NumExprs;
- E->LParenLoc = ReadSourceLocation(Record, Idx);
- E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->LParenLoc = ReadSourceLocation();
+ E->RParenLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitUnaryOperator(UnaryOperator *E) {
VisitExpr(E);
- E->setSubExpr(Reader.ReadSubExpr());
- E->setOpcode((UnaryOperator::Opcode)Record[Idx++]);
- E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+ E->setSubExpr(Record.readSubExpr());
+ E->setOpcode((UnaryOperator::Opcode)Record.readInt());
+ E->setOperatorLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
VisitExpr(E);
- assert(E->getNumComponents() == Record[Idx]);
- ++Idx;
- assert(E->getNumExpressions() == Record[Idx]);
- ++Idx;
- E->setOperatorLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
- E->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ assert(E->getNumComponents() == Record.peekInt());
+ Record.skipInts(1);
+ assert(E->getNumExpressions() == Record.peekInt());
+ Record.skipInts(1);
+ E->setOperatorLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
+ E->setTypeSourceInfo(GetTypeSourceInfo());
for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
- OffsetOfNode::Kind Kind = static_cast<OffsetOfNode::Kind>(Record[Idx++]);
- SourceLocation Start = ReadSourceLocation(Record, Idx);
- SourceLocation End = ReadSourceLocation(Record, Idx);
+ OffsetOfNode::Kind Kind = static_cast<OffsetOfNode::Kind>(Record.readInt());
+ SourceLocation Start = ReadSourceLocation();
+ SourceLocation End = ReadSourceLocation();
switch (Kind) {
case OffsetOfNode::Array:
- E->setComponent(I, OffsetOfNode(Start, Record[Idx++], End));
+ E->setComponent(I, OffsetOfNode(Start, Record.readInt(), End));
break;
case OffsetOfNode::Field:
E->setComponent(
- I, OffsetOfNode(Start, ReadDeclAs<FieldDecl>(Record, Idx), End));
+ I, OffsetOfNode(Start, ReadDeclAs<FieldDecl>(), End));
break;
case OffsetOfNode::Identifier:
E->setComponent(
I,
- OffsetOfNode(Start, Reader.GetIdentifierInfo(F, Record, Idx), End));
+ OffsetOfNode(Start, Record.getIdentifierInfo(), End));
break;
case OffsetOfNode::Base: {
- CXXBaseSpecifier *Base = new (Reader.getContext()) CXXBaseSpecifier();
- *Base = Reader.ReadCXXBaseSpecifier(F, Record, Idx);
+ CXXBaseSpecifier *Base = new (Record.getContext()) CXXBaseSpecifier();
+ *Base = Record.readCXXBaseSpecifier();
E->setComponent(I, OffsetOfNode(Base));
break;
}
}
}
-
+
for (unsigned I = 0, N = E->getNumExpressions(); I != N; ++I)
- E->setIndexExpr(I, Reader.ReadSubExpr());
+ E->setIndexExpr(I, Record.readSubExpr());
}
void ASTStmtReader::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
VisitExpr(E);
- E->setKind(static_cast<UnaryExprOrTypeTrait>(Record[Idx++]));
- if (Record[Idx] == 0) {
- E->setArgument(Reader.ReadSubExpr());
- ++Idx;
+ E->setKind(static_cast<UnaryExprOrTypeTrait>(Record.readInt()));
+ if (Record.peekInt() == 0) {
+ E->setArgument(Record.readSubExpr());
+ Record.skipInts(1);
} else {
- E->setArgument(GetTypeSourceInfo(Record, Idx));
+ E->setArgument(GetTypeSourceInfo());
}
- E->setOperatorLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setOperatorLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
VisitExpr(E);
- E->setLHS(Reader.ReadSubExpr());
- E->setRHS(Reader.ReadSubExpr());
- E->setRBracketLoc(ReadSourceLocation(Record, Idx));
+ E->setLHS(Record.readSubExpr());
+ E->setRHS(Record.readSubExpr());
+ E->setRBracketLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitOMPArraySectionExpr(OMPArraySectionExpr *E) {
VisitExpr(E);
- E->setBase(Reader.ReadSubExpr());
- E->setLowerBound(Reader.ReadSubExpr());
- E->setLength(Reader.ReadSubExpr());
- E->setColonLoc(ReadSourceLocation(Record, Idx));
- E->setRBracketLoc(ReadSourceLocation(Record, Idx));
+ E->setBase(Record.readSubExpr());
+ E->setLowerBound(Record.readSubExpr());
+ E->setLength(Record.readSubExpr());
+ E->setColonLoc(ReadSourceLocation());
+ E->setRBracketLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitCallExpr(CallExpr *E) {
VisitExpr(E);
- E->setNumArgs(Reader.getContext(), Record[Idx++]);
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
- E->setCallee(Reader.ReadSubExpr());
+ E->setNumArgs(Record.getContext(), Record.readInt());
+ E->setRParenLoc(ReadSourceLocation());
+ E->setCallee(Record.readSubExpr());
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
- E->setArg(I, Reader.ReadSubExpr());
+ E->setArg(I, Record.readSubExpr());
}
void ASTStmtReader::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
@@ -644,74 +625,74 @@ void ASTStmtReader::VisitMemberExpr(MemberExpr *E) {
void ASTStmtReader::VisitObjCIsaExpr(ObjCIsaExpr *E) {
VisitExpr(E);
- E->setBase(Reader.ReadSubExpr());
- E->setIsaMemberLoc(ReadSourceLocation(Record, Idx));
- E->setOpLoc(ReadSourceLocation(Record, Idx));
- E->setArrow(Record[Idx++]);
+ E->setBase(Record.readSubExpr());
+ E->setIsaMemberLoc(ReadSourceLocation());
+ E->setOpLoc(ReadSourceLocation());
+ E->setArrow(Record.readInt());
}
void ASTStmtReader::
VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
VisitExpr(E);
- E->Operand = Reader.ReadSubExpr();
- E->setShouldCopy(Record[Idx++]);
+ E->Operand = Record.readSubExpr();
+ E->setShouldCopy(Record.readInt());
}
void ASTStmtReader::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
VisitExplicitCastExpr(E);
- E->LParenLoc = ReadSourceLocation(Record, Idx);
- E->BridgeKeywordLoc = ReadSourceLocation(Record, Idx);
- E->Kind = Record[Idx++];
+ E->LParenLoc = ReadSourceLocation();
+ E->BridgeKeywordLoc = ReadSourceLocation();
+ E->Kind = Record.readInt();
}
void ASTStmtReader::VisitCastExpr(CastExpr *E) {
VisitExpr(E);
- unsigned NumBaseSpecs = Record[Idx++];
+ unsigned NumBaseSpecs = Record.readInt();
assert(NumBaseSpecs == E->path_size());
- E->setSubExpr(Reader.ReadSubExpr());
- E->setCastKind((CastKind)Record[Idx++]);
+ E->setSubExpr(Record.readSubExpr());
+ E->setCastKind((CastKind)Record.readInt());
CastExpr::path_iterator BaseI = E->path_begin();
while (NumBaseSpecs--) {
- CXXBaseSpecifier *BaseSpec = new (Reader.getContext()) CXXBaseSpecifier;
- *BaseSpec = Reader.ReadCXXBaseSpecifier(F, Record, Idx);
+ CXXBaseSpecifier *BaseSpec = new (Record.getContext()) CXXBaseSpecifier;
+ *BaseSpec = Record.readCXXBaseSpecifier();
*BaseI++ = BaseSpec;
}
}
void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
VisitExpr(E);
- E->setLHS(Reader.ReadSubExpr());
- E->setRHS(Reader.ReadSubExpr());
- E->setOpcode((BinaryOperator::Opcode)Record[Idx++]);
- E->setOperatorLoc(ReadSourceLocation(Record, Idx));
- E->setFPContractable((bool)Record[Idx++]);
+ E->setLHS(Record.readSubExpr());
+ E->setRHS(Record.readSubExpr());
+ E->setOpcode((BinaryOperator::Opcode)Record.readInt());
+ E->setOperatorLoc(ReadSourceLocation());
+ E->setFPContractable((bool)Record.readInt());
}
void ASTStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
VisitBinaryOperator(E);
- E->setComputationLHSType(Reader.readType(F, Record, Idx));
- E->setComputationResultType(Reader.readType(F, Record, Idx));
+ E->setComputationLHSType(Record.readType());
+ E->setComputationResultType(Record.readType());
}
void ASTStmtReader::VisitConditionalOperator(ConditionalOperator *E) {
VisitExpr(E);
- E->SubExprs[ConditionalOperator::COND] = Reader.ReadSubExpr();
- E->SubExprs[ConditionalOperator::LHS] = Reader.ReadSubExpr();
- E->SubExprs[ConditionalOperator::RHS] = Reader.ReadSubExpr();
- E->QuestionLoc = ReadSourceLocation(Record, Idx);
- E->ColonLoc = ReadSourceLocation(Record, Idx);
+ E->SubExprs[ConditionalOperator::COND] = Record.readSubExpr();
+ E->SubExprs[ConditionalOperator::LHS] = Record.readSubExpr();
+ E->SubExprs[ConditionalOperator::RHS] = Record.readSubExpr();
+ E->QuestionLoc = ReadSourceLocation();
+ E->ColonLoc = ReadSourceLocation();
}
void
ASTStmtReader::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
VisitExpr(E);
- E->OpaqueValue = cast<OpaqueValueExpr>(Reader.ReadSubExpr());
- E->SubExprs[BinaryConditionalOperator::COMMON] = Reader.ReadSubExpr();
- E->SubExprs[BinaryConditionalOperator::COND] = Reader.ReadSubExpr();
- E->SubExprs[BinaryConditionalOperator::LHS] = Reader.ReadSubExpr();
- E->SubExprs[BinaryConditionalOperator::RHS] = Reader.ReadSubExpr();
- E->QuestionLoc = ReadSourceLocation(Record, Idx);
- E->ColonLoc = ReadSourceLocation(Record, Idx);
+ E->OpaqueValue = cast<OpaqueValueExpr>(Record.readSubExpr());
+ E->SubExprs[BinaryConditionalOperator::COMMON] = Record.readSubExpr();
+ E->SubExprs[BinaryConditionalOperator::COND] = Record.readSubExpr();
+ E->SubExprs[BinaryConditionalOperator::LHS] = Record.readSubExpr();
+ E->SubExprs[BinaryConditionalOperator::RHS] = Record.readSubExpr();
+ E->QuestionLoc = ReadSourceLocation();
+ E->ColonLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
@@ -720,54 +701,54 @@ void ASTStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
void ASTStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) {
VisitCastExpr(E);
- E->setTypeInfoAsWritten(GetTypeSourceInfo(Record, Idx));
+ E->setTypeInfoAsWritten(GetTypeSourceInfo());
}
void ASTStmtReader::VisitCStyleCastExpr(CStyleCastExpr *E) {
VisitExplicitCastExpr(E);
- E->setLParenLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setLParenLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
VisitExpr(E);
- E->setLParenLoc(ReadSourceLocation(Record, Idx));
- E->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
- E->setInitializer(Reader.ReadSubExpr());
- E->setFileScope(Record[Idx++]);
+ E->setLParenLoc(ReadSourceLocation());
+ E->setTypeSourceInfo(GetTypeSourceInfo());
+ E->setInitializer(Record.readSubExpr());
+ E->setFileScope(Record.readInt());
}
void ASTStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
VisitExpr(E);
- E->setBase(Reader.ReadSubExpr());
- E->setAccessor(Reader.GetIdentifierInfo(F, Record, Idx));
- E->setAccessorLoc(ReadSourceLocation(Record, Idx));
+ E->setBase(Record.readSubExpr());
+ E->setAccessor(Record.getIdentifierInfo());
+ E->setAccessorLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitInitListExpr(InitListExpr *E) {
VisitExpr(E);
- if (InitListExpr *SyntForm = cast_or_null<InitListExpr>(Reader.ReadSubStmt()))
+ if (InitListExpr *SyntForm = cast_or_null<InitListExpr>(Record.readSubStmt()))
E->setSyntacticForm(SyntForm);
- E->setLBraceLoc(ReadSourceLocation(Record, Idx));
- E->setRBraceLoc(ReadSourceLocation(Record, Idx));
- bool isArrayFiller = Record[Idx++];
+ E->setLBraceLoc(ReadSourceLocation());
+ E->setRBraceLoc(ReadSourceLocation());
+ bool isArrayFiller = Record.readInt();
Expr *filler = nullptr;
if (isArrayFiller) {
- filler = Reader.ReadSubExpr();
+ filler = Record.readSubExpr();
E->ArrayFillerOrUnionFieldInit = filler;
} else
- E->ArrayFillerOrUnionFieldInit = ReadDeclAs<FieldDecl>(Record, Idx);
- E->sawArrayRangeDesignator(Record[Idx++]);
- unsigned NumInits = Record[Idx++];
- E->reserveInits(Reader.getContext(), NumInits);
+ E->ArrayFillerOrUnionFieldInit = ReadDeclAs<FieldDecl>();
+ E->sawArrayRangeDesignator(Record.readInt());
+ unsigned NumInits = Record.readInt();
+ E->reserveInits(Record.getContext(), NumInits);
if (isArrayFiller) {
for (unsigned I = 0; I != NumInits; ++I) {
- Expr *init = Reader.ReadSubExpr();
- E->updateInit(Reader.getContext(), I, init ? init : filler);
+ Expr *init = Record.readSubExpr();
+ E->updateInit(Record.getContext(), I, init ? init : filler);
}
} else {
for (unsigned I = 0; I != NumInits; ++I)
- E->updateInit(Reader.getContext(), I, Reader.ReadSubExpr());
+ E->updateInit(Record.getContext(), I, Record.readSubExpr());
}
}
@@ -775,22 +756,20 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
typedef DesignatedInitExpr::Designator Designator;
VisitExpr(E);
- unsigned NumSubExprs = Record[Idx++];
+ unsigned NumSubExprs = Record.readInt();
assert(NumSubExprs == E->getNumSubExprs() && "Wrong number of subexprs");
for (unsigned I = 0; I != NumSubExprs; ++I)
- E->setSubExpr(I, Reader.ReadSubExpr());
- E->setEqualOrColonLoc(ReadSourceLocation(Record, Idx));
- E->setGNUSyntax(Record[Idx++]);
+ E->setSubExpr(I, Record.readSubExpr());
+ E->setEqualOrColonLoc(ReadSourceLocation());
+ E->setGNUSyntax(Record.readInt());
SmallVector<Designator, 4> Designators;
- while (Idx < Record.size()) {
- switch ((DesignatorTypes)Record[Idx++]) {
+ while (Record.getIdx() < Record.size()) {
+ switch ((DesignatorTypes)Record.readInt()) {
case DESIG_FIELD_DECL: {
- FieldDecl *Field = ReadDeclAs<FieldDecl>(Record, Idx);
- SourceLocation DotLoc
- = ReadSourceLocation(Record, Idx);
- SourceLocation FieldLoc
- = ReadSourceLocation(Record, Idx);
+ FieldDecl *Field = ReadDeclAs<FieldDecl>();
+ SourceLocation DotLoc = ReadSourceLocation();
+ SourceLocation FieldLoc = ReadSourceLocation();
Designators.push_back(Designator(Field->getIdentifier(), DotLoc,
FieldLoc));
Designators.back().setField(Field);
@@ -798,162 +777,165 @@ void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
}
case DESIG_FIELD_NAME: {
- const IdentifierInfo *Name = Reader.GetIdentifierInfo(F, Record, Idx);
- SourceLocation DotLoc
- = ReadSourceLocation(Record, Idx);
- SourceLocation FieldLoc
- = ReadSourceLocation(Record, Idx);
+ const IdentifierInfo *Name = Record.getIdentifierInfo();
+ SourceLocation DotLoc = ReadSourceLocation();
+ SourceLocation FieldLoc = ReadSourceLocation();
Designators.push_back(Designator(Name, DotLoc, FieldLoc));
break;
}
case DESIG_ARRAY: {
- unsigned Index = Record[Idx++];
- SourceLocation LBracketLoc
- = ReadSourceLocation(Record, Idx);
- SourceLocation RBracketLoc
- = ReadSourceLocation(Record, Idx);
+ unsigned Index = Record.readInt();
+ SourceLocation LBracketLoc = ReadSourceLocation();
+ SourceLocation RBracketLoc = ReadSourceLocation();
Designators.push_back(Designator(Index, LBracketLoc, RBracketLoc));
break;
}
case DESIG_ARRAY_RANGE: {
- unsigned Index = Record[Idx++];
- SourceLocation LBracketLoc
- = ReadSourceLocation(Record, Idx);
- SourceLocation EllipsisLoc
- = ReadSourceLocation(Record, Idx);
- SourceLocation RBracketLoc
- = ReadSourceLocation(Record, Idx);
+ unsigned Index = Record.readInt();
+ SourceLocation LBracketLoc = ReadSourceLocation();
+ SourceLocation EllipsisLoc = ReadSourceLocation();
+ SourceLocation RBracketLoc = ReadSourceLocation();
Designators.push_back(Designator(Index, LBracketLoc, EllipsisLoc,
RBracketLoc));
break;
}
}
}
- E->setDesignators(Reader.getContext(),
+ E->setDesignators(Record.getContext(),
Designators.data(), Designators.size());
}
void ASTStmtReader::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
VisitExpr(E);
- E->setBase(Reader.ReadSubExpr());
- E->setUpdater(Reader.ReadSubExpr());
+ E->setBase(Record.readSubExpr());
+ E->setUpdater(Record.readSubExpr());
}
void ASTStmtReader::VisitNoInitExpr(NoInitExpr *E) {
VisitExpr(E);
}
+void ASTStmtReader::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) {
+ VisitExpr(E);
+ E->SubExprs[0] = Record.readSubExpr();
+ E->SubExprs[1] = Record.readSubExpr();
+}
+
+void ASTStmtReader::VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
+ VisitExpr(E);
+}
+
void ASTStmtReader::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
VisitExpr(E);
}
void ASTStmtReader::VisitVAArgExpr(VAArgExpr *E) {
VisitExpr(E);
- E->setSubExpr(Reader.ReadSubExpr());
- E->setWrittenTypeInfo(GetTypeSourceInfo(Record, Idx));
- E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
- E->setIsMicrosoftABI(Record[Idx++]);
+ E->setSubExpr(Record.readSubExpr());
+ E->setWrittenTypeInfo(GetTypeSourceInfo());
+ E->setBuiltinLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
+ E->setIsMicrosoftABI(Record.readInt());
}
void ASTStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
VisitExpr(E);
- E->setAmpAmpLoc(ReadSourceLocation(Record, Idx));
- E->setLabelLoc(ReadSourceLocation(Record, Idx));
- E->setLabel(ReadDeclAs<LabelDecl>(Record, Idx));
+ E->setAmpAmpLoc(ReadSourceLocation());
+ E->setLabelLoc(ReadSourceLocation());
+ E->setLabel(ReadDeclAs<LabelDecl>());
}
void ASTStmtReader::VisitStmtExpr(StmtExpr *E) {
VisitExpr(E);
- E->setLParenLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
- E->setSubStmt(cast_or_null<CompoundStmt>(Reader.ReadSubStmt()));
+ E->setLParenLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
+ E->setSubStmt(cast_or_null<CompoundStmt>(Record.readSubStmt()));
}
void ASTStmtReader::VisitChooseExpr(ChooseExpr *E) {
VisitExpr(E);
- E->setCond(Reader.ReadSubExpr());
- E->setLHS(Reader.ReadSubExpr());
- E->setRHS(Reader.ReadSubExpr());
- E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
- E->setIsConditionTrue(Record[Idx++]);
+ E->setCond(Record.readSubExpr());
+ E->setLHS(Record.readSubExpr());
+ E->setRHS(Record.readSubExpr());
+ E->setBuiltinLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
+ E->setIsConditionTrue(Record.readInt());
}
void ASTStmtReader::VisitGNUNullExpr(GNUNullExpr *E) {
VisitExpr(E);
- E->setTokenLocation(ReadSourceLocation(Record, Idx));
+ E->setTokenLocation(ReadSourceLocation());
}
void ASTStmtReader::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
VisitExpr(E);
SmallVector<Expr *, 16> Exprs;
- unsigned NumExprs = Record[Idx++];
+ unsigned NumExprs = Record.readInt();
while (NumExprs--)
- Exprs.push_back(Reader.ReadSubExpr());
- E->setExprs(Reader.getContext(), Exprs);
- E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ Exprs.push_back(Record.readSubExpr());
+ E->setExprs(Record.getContext(), Exprs);
+ E->setBuiltinLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitConvertVectorExpr(ConvertVectorExpr *E) {
VisitExpr(E);
- E->BuiltinLoc = ReadSourceLocation(Record, Idx);
- E->RParenLoc = ReadSourceLocation(Record, Idx);
- E->TInfo = GetTypeSourceInfo(Record, Idx);
- E->SrcExpr = Reader.ReadSubExpr();
+ E->BuiltinLoc = ReadSourceLocation();
+ E->RParenLoc = ReadSourceLocation();
+ E->TInfo = GetTypeSourceInfo();
+ E->SrcExpr = Record.readSubExpr();
}
void ASTStmtReader::VisitBlockExpr(BlockExpr *E) {
VisitExpr(E);
- E->setBlockDecl(ReadDeclAs<BlockDecl>(Record, Idx));
+ E->setBlockDecl(ReadDeclAs<BlockDecl>());
}
void ASTStmtReader::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
VisitExpr(E);
- E->NumAssocs = Record[Idx++];
- E->AssocTypes = new (Reader.getContext()) TypeSourceInfo*[E->NumAssocs];
+ E->NumAssocs = Record.readInt();
+ E->AssocTypes = new (Record.getContext()) TypeSourceInfo*[E->NumAssocs];
E->SubExprs =
- new(Reader.getContext()) Stmt*[GenericSelectionExpr::END_EXPR+E->NumAssocs];
+ new(Record.getContext()) Stmt*[GenericSelectionExpr::END_EXPR+E->NumAssocs];
- E->SubExprs[GenericSelectionExpr::CONTROLLING] = Reader.ReadSubExpr();
+ E->SubExprs[GenericSelectionExpr::CONTROLLING] = Record.readSubExpr();
for (unsigned I = 0, N = E->getNumAssocs(); I != N; ++I) {
- E->AssocTypes[I] = GetTypeSourceInfo(Record, Idx);
- E->SubExprs[GenericSelectionExpr::END_EXPR+I] = Reader.ReadSubExpr();
+ E->AssocTypes[I] = GetTypeSourceInfo();
+ E->SubExprs[GenericSelectionExpr::END_EXPR+I] = Record.readSubExpr();
}
- E->ResultIndex = Record[Idx++];
+ E->ResultIndex = Record.readInt();
- E->GenericLoc = ReadSourceLocation(Record, Idx);
- E->DefaultLoc = ReadSourceLocation(Record, Idx);
- E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->GenericLoc = ReadSourceLocation();
+ E->DefaultLoc = ReadSourceLocation();
+ E->RParenLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
VisitExpr(E);
- unsigned numSemanticExprs = Record[Idx++];
+ unsigned numSemanticExprs = Record.readInt();
assert(numSemanticExprs + 1 == E->PseudoObjectExprBits.NumSubExprs);
- E->PseudoObjectExprBits.ResultIndex = Record[Idx++];
+ E->PseudoObjectExprBits.ResultIndex = Record.readInt();
// Read the syntactic expression.
- E->getSubExprsBuffer()[0] = Reader.ReadSubExpr();
+ E->getSubExprsBuffer()[0] = Record.readSubExpr();
// Read all the semantic expressions.
for (unsigned i = 0; i != numSemanticExprs; ++i) {
- Expr *subExpr = Reader.ReadSubExpr();
+ Expr *subExpr = Record.readSubExpr();
E->getSubExprsBuffer()[i+1] = subExpr;
}
}
void ASTStmtReader::VisitAtomicExpr(AtomicExpr *E) {
VisitExpr(E);
- E->Op = AtomicExpr::AtomicOp(Record[Idx++]);
+ E->Op = AtomicExpr::AtomicOp(Record.readInt());
E->NumSubExprs = AtomicExpr::getNumSubExprs(E->Op);
for (unsigned I = 0; I != E->NumSubExprs; ++I)
- E->SubExprs[I] = Reader.ReadSubExpr();
- E->BuiltinLoc = ReadSourceLocation(Record, Idx);
- E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->SubExprs[I] = Record.readSubExpr();
+ E->BuiltinLoc = ReadSourceLocation();
+ E->RParenLoc = ReadSourceLocation();
}
//===----------------------------------------------------------------------===//
@@ -961,142 +943,141 @@ void ASTStmtReader::VisitAtomicExpr(AtomicExpr *E) {
void ASTStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) {
VisitExpr(E);
- E->setString(cast<StringLiteral>(Reader.ReadSubStmt()));
- E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->setString(cast<StringLiteral>(Record.readSubStmt()));
+ E->setAtLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
VisitExpr(E);
// could be one of several IntegerLiteral, FloatLiteral, etc.
- E->SubExpr = Reader.ReadSubStmt();
- E->BoxingMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
- E->Range = ReadSourceRange(Record, Idx);
+ E->SubExpr = Record.readSubStmt();
+ E->BoxingMethod = ReadDeclAs<ObjCMethodDecl>();
+ E->Range = ReadSourceRange();
}
void ASTStmtReader::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
VisitExpr(E);
- unsigned NumElements = Record[Idx++];
+ unsigned NumElements = Record.readInt();
assert(NumElements == E->getNumElements() && "Wrong number of elements");
Expr **Elements = E->getElements();
for (unsigned I = 0, N = NumElements; I != N; ++I)
- Elements[I] = Reader.ReadSubExpr();
- E->ArrayWithObjectsMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
- E->Range = ReadSourceRange(Record, Idx);
+ Elements[I] = Record.readSubExpr();
+ E->ArrayWithObjectsMethod = ReadDeclAs<ObjCMethodDecl>();
+ E->Range = ReadSourceRange();
}
void ASTStmtReader::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
VisitExpr(E);
- unsigned NumElements = Record[Idx++];
+ unsigned NumElements = Record.readInt();
assert(NumElements == E->getNumElements() && "Wrong number of elements");
- bool HasPackExpansions = Record[Idx++];
+ bool HasPackExpansions = Record.readInt();
assert(HasPackExpansions == E->HasPackExpansions &&"Pack expansion mismatch");
ObjCDictionaryLiteral::KeyValuePair *KeyValues =
E->getTrailingObjects<ObjCDictionaryLiteral::KeyValuePair>();
ObjCDictionaryLiteral::ExpansionData *Expansions =
E->getTrailingObjects<ObjCDictionaryLiteral::ExpansionData>();
for (unsigned I = 0; I != NumElements; ++I) {
- KeyValues[I].Key = Reader.ReadSubExpr();
- KeyValues[I].Value = Reader.ReadSubExpr();
+ KeyValues[I].Key = Record.readSubExpr();
+ KeyValues[I].Value = Record.readSubExpr();
if (HasPackExpansions) {
- Expansions[I].EllipsisLoc = ReadSourceLocation(Record, Idx);
- Expansions[I].NumExpansionsPlusOne = Record[Idx++];
+ Expansions[I].EllipsisLoc = ReadSourceLocation();
+ Expansions[I].NumExpansionsPlusOne = Record.readInt();
}
}
- E->DictWithObjectsMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
- E->Range = ReadSourceRange(Record, Idx);
+ E->DictWithObjectsMethod = ReadDeclAs<ObjCMethodDecl>();
+ E->Range = ReadSourceRange();
}
void ASTStmtReader::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
VisitExpr(E);
- E->setEncodedTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
- E->setAtLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setEncodedTypeSourceInfo(GetTypeSourceInfo());
+ E->setAtLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
VisitExpr(E);
- E->setSelector(Reader.ReadSelector(F, Record, Idx));
- E->setAtLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setSelector(Record.readSelector());
+ E->setAtLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
VisitExpr(E);
- E->setProtocol(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
- E->setAtLoc(ReadSourceLocation(Record, Idx));
- E->ProtoLoc = ReadSourceLocation(Record, Idx);
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setProtocol(ReadDeclAs<ObjCProtocolDecl>());
+ E->setAtLoc(ReadSourceLocation());
+ E->ProtoLoc = ReadSourceLocation();
+ E->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
VisitExpr(E);
- E->setDecl(ReadDeclAs<ObjCIvarDecl>(Record, Idx));
- E->setLocation(ReadSourceLocation(Record, Idx));
- E->setOpLoc(ReadSourceLocation(Record, Idx));
- E->setBase(Reader.ReadSubExpr());
- E->setIsArrow(Record[Idx++]);
- E->setIsFreeIvar(Record[Idx++]);
+ E->setDecl(ReadDeclAs<ObjCIvarDecl>());
+ E->setLocation(ReadSourceLocation());
+ E->setOpLoc(ReadSourceLocation());
+ E->setBase(Record.readSubExpr());
+ E->setIsArrow(Record.readInt());
+ E->setIsFreeIvar(Record.readInt());
}
void ASTStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
VisitExpr(E);
- unsigned MethodRefFlags = Record[Idx++];
- bool Implicit = Record[Idx++] != 0;
+ unsigned MethodRefFlags = Record.readInt();
+ bool Implicit = Record.readInt() != 0;
if (Implicit) {
- ObjCMethodDecl *Getter = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
- ObjCMethodDecl *Setter = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ ObjCMethodDecl *Getter = ReadDeclAs<ObjCMethodDecl>();
+ ObjCMethodDecl *Setter = ReadDeclAs<ObjCMethodDecl>();
E->setImplicitProperty(Getter, Setter, MethodRefFlags);
} else {
- E->setExplicitProperty(ReadDeclAs<ObjCPropertyDecl>(Record, Idx),
- MethodRefFlags);
+ E->setExplicitProperty(ReadDeclAs<ObjCPropertyDecl>(), MethodRefFlags);
}
- E->setLocation(ReadSourceLocation(Record, Idx));
- E->setReceiverLocation(ReadSourceLocation(Record, Idx));
- switch (Record[Idx++]) {
+ E->setLocation(ReadSourceLocation());
+ E->setReceiverLocation(ReadSourceLocation());
+ switch (Record.readInt()) {
case 0:
- E->setBase(Reader.ReadSubExpr());
+ E->setBase(Record.readSubExpr());
break;
case 1:
- E->setSuperReceiver(Reader.readType(F, Record, Idx));
+ E->setSuperReceiver(Record.readType());
break;
case 2:
- E->setClassReceiver(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+ E->setClassReceiver(ReadDeclAs<ObjCInterfaceDecl>());
break;
}
}
void ASTStmtReader::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
VisitExpr(E);
- E->setRBracket(ReadSourceLocation(Record, Idx));
- E->setBaseExpr(Reader.ReadSubExpr());
- E->setKeyExpr(Reader.ReadSubExpr());
- E->GetAtIndexMethodDecl = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
- E->SetAtIndexMethodDecl = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->setRBracket(ReadSourceLocation());
+ E->setBaseExpr(Record.readSubExpr());
+ E->setKeyExpr(Record.readSubExpr());
+ E->GetAtIndexMethodDecl = ReadDeclAs<ObjCMethodDecl>();
+ E->SetAtIndexMethodDecl = ReadDeclAs<ObjCMethodDecl>();
}
void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
VisitExpr(E);
- assert(Record[Idx] == E->getNumArgs());
- ++Idx;
- unsigned NumStoredSelLocs = Record[Idx++];
- E->SelLocsKind = Record[Idx++];
- E->setDelegateInitCall(Record[Idx++]);
- E->IsImplicit = Record[Idx++];
+ assert(Record.peekInt() == E->getNumArgs());
+ Record.skipInts(1);
+ unsigned NumStoredSelLocs = Record.readInt();
+ E->SelLocsKind = Record.readInt();
+ E->setDelegateInitCall(Record.readInt());
+ E->IsImplicit = Record.readInt();
ObjCMessageExpr::ReceiverKind Kind
- = static_cast<ObjCMessageExpr::ReceiverKind>(Record[Idx++]);
+ = static_cast<ObjCMessageExpr::ReceiverKind>(Record.readInt());
switch (Kind) {
case ObjCMessageExpr::Instance:
- E->setInstanceReceiver(Reader.ReadSubExpr());
+ E->setInstanceReceiver(Record.readSubExpr());
break;
case ObjCMessageExpr::Class:
- E->setClassReceiver(GetTypeSourceInfo(Record, Idx));
+ E->setClassReceiver(GetTypeSourceInfo());
break;
case ObjCMessageExpr::SuperClass:
case ObjCMessageExpr::SuperInstance: {
- QualType T = Reader.readType(F, Record, Idx);
- SourceLocation SuperLoc = ReadSourceLocation(Record, Idx);
+ QualType T = Record.readType();
+ SourceLocation SuperLoc = ReadSourceLocation();
E->setSuper(SuperLoc, T, Kind == ObjCMessageExpr::SuperInstance);
break;
}
@@ -1104,90 +1085,90 @@ void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
assert(Kind == E->getReceiverKind());
- if (Record[Idx++])
- E->setMethodDecl(ReadDeclAs<ObjCMethodDecl>(Record, Idx));
+ if (Record.readInt())
+ E->setMethodDecl(ReadDeclAs<ObjCMethodDecl>());
else
- E->setSelector(Reader.ReadSelector(F, Record, Idx));
+ E->setSelector(Record.readSelector());
- E->LBracLoc = ReadSourceLocation(Record, Idx);
- E->RBracLoc = ReadSourceLocation(Record, Idx);
+ E->LBracLoc = ReadSourceLocation();
+ E->RBracLoc = ReadSourceLocation();
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
- E->setArg(I, Reader.ReadSubExpr());
+ E->setArg(I, Record.readSubExpr());
SourceLocation *Locs = E->getStoredSelLocs();
for (unsigned I = 0; I != NumStoredSelLocs; ++I)
- Locs[I] = ReadSourceLocation(Record, Idx);
+ Locs[I] = ReadSourceLocation();
}
void ASTStmtReader::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
VisitStmt(S);
- S->setElement(Reader.ReadSubStmt());
- S->setCollection(Reader.ReadSubExpr());
- S->setBody(Reader.ReadSubStmt());
- S->setForLoc(ReadSourceLocation(Record, Idx));
- S->setRParenLoc(ReadSourceLocation(Record, Idx));
+ S->setElement(Record.readSubStmt());
+ S->setCollection(Record.readSubExpr());
+ S->setBody(Record.readSubStmt());
+ S->setForLoc(ReadSourceLocation());
+ S->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
VisitStmt(S);
- S->setCatchBody(Reader.ReadSubStmt());
- S->setCatchParamDecl(ReadDeclAs<VarDecl>(Record, Idx));
- S->setAtCatchLoc(ReadSourceLocation(Record, Idx));
- S->setRParenLoc(ReadSourceLocation(Record, Idx));
+ S->setCatchBody(Record.readSubStmt());
+ S->setCatchParamDecl(ReadDeclAs<VarDecl>());
+ S->setAtCatchLoc(ReadSourceLocation());
+ S->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
VisitStmt(S);
- S->setFinallyBody(Reader.ReadSubStmt());
- S->setAtFinallyLoc(ReadSourceLocation(Record, Idx));
+ S->setFinallyBody(Record.readSubStmt());
+ S->setAtFinallyLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
VisitStmt(S);
- S->setSubStmt(Reader.ReadSubStmt());
- S->setAtLoc(ReadSourceLocation(Record, Idx));
+ S->setSubStmt(Record.readSubStmt());
+ S->setAtLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
VisitStmt(S);
- assert(Record[Idx] == S->getNumCatchStmts());
- ++Idx;
- bool HasFinally = Record[Idx++];
- S->setTryBody(Reader.ReadSubStmt());
+ assert(Record.peekInt() == S->getNumCatchStmts());
+ Record.skipInts(1);
+ bool HasFinally = Record.readInt();
+ S->setTryBody(Record.readSubStmt());
for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I)
- S->setCatchStmt(I, cast_or_null<ObjCAtCatchStmt>(Reader.ReadSubStmt()));
+ S->setCatchStmt(I, cast_or_null<ObjCAtCatchStmt>(Record.readSubStmt()));
if (HasFinally)
- S->setFinallyStmt(Reader.ReadSubStmt());
- S->setAtTryLoc(ReadSourceLocation(Record, Idx));
+ S->setFinallyStmt(Record.readSubStmt());
+ S->setAtTryLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
VisitStmt(S);
- S->setSynchExpr(Reader.ReadSubStmt());
- S->setSynchBody(Reader.ReadSubStmt());
- S->setAtSynchronizedLoc(ReadSourceLocation(Record, Idx));
+ S->setSynchExpr(Record.readSubStmt());
+ S->setSynchBody(Record.readSubStmt());
+ S->setAtSynchronizedLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
VisitStmt(S);
- S->setThrowExpr(Reader.ReadSubStmt());
- S->setThrowLoc(ReadSourceLocation(Record, Idx));
+ S->setThrowExpr(Record.readSubStmt());
+ S->setThrowLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
VisitExpr(E);
- E->setValue(Record[Idx++]);
- E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setValue(Record.readInt());
+ E->setLocation(ReadSourceLocation());
}
void ASTStmtReader::VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
VisitExpr(E);
- SourceRange R = Reader.ReadSourceRange(F, Record, Idx);
+ SourceRange R = Record.readSourceRange();
E->AtLoc = R.getBegin();
E->RParen = R.getEnd();
- E->VersionToCheck = Reader.ReadVersionTuple(Record, Idx);
+ E->VersionToCheck = Record.readVersionTuple();
}
//===----------------------------------------------------------------------===//
@@ -1196,125 +1177,113 @@ void ASTStmtReader::VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E)
void ASTStmtReader::VisitCXXCatchStmt(CXXCatchStmt *S) {
VisitStmt(S);
- S->CatchLoc = ReadSourceLocation(Record, Idx);
- S->ExceptionDecl = ReadDeclAs<VarDecl>(Record, Idx);
- S->HandlerBlock = Reader.ReadSubStmt();
+ S->CatchLoc = ReadSourceLocation();
+ S->ExceptionDecl = ReadDeclAs<VarDecl>();
+ S->HandlerBlock = Record.readSubStmt();
}
void ASTStmtReader::VisitCXXTryStmt(CXXTryStmt *S) {
VisitStmt(S);
- assert(Record[Idx] == S->getNumHandlers() && "NumStmtFields is wrong ?");
- ++Idx;
- S->TryLoc = ReadSourceLocation(Record, Idx);
- S->getStmts()[0] = Reader.ReadSubStmt();
+ assert(Record.peekInt() == S->getNumHandlers() && "NumStmtFields is wrong ?");
+ Record.skipInts(1);
+ S->TryLoc = ReadSourceLocation();
+ S->getStmts()[0] = Record.readSubStmt();
for (unsigned i = 0, e = S->getNumHandlers(); i != e; ++i)
- S->getStmts()[i + 1] = Reader.ReadSubStmt();
+ S->getStmts()[i + 1] = Record.readSubStmt();
}
void ASTStmtReader::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
VisitStmt(S);
- S->ForLoc = ReadSourceLocation(Record, Idx);
- S->CoawaitLoc = ReadSourceLocation(Record, Idx);
- S->ColonLoc = ReadSourceLocation(Record, Idx);
- S->RParenLoc = ReadSourceLocation(Record, Idx);
- S->setRangeStmt(Reader.ReadSubStmt());
- S->setBeginStmt(Reader.ReadSubStmt());
- S->setEndStmt(Reader.ReadSubStmt());
- S->setCond(Reader.ReadSubExpr());
- S->setInc(Reader.ReadSubExpr());
- S->setLoopVarStmt(Reader.ReadSubStmt());
- S->setBody(Reader.ReadSubStmt());
+ S->ForLoc = ReadSourceLocation();
+ S->CoawaitLoc = ReadSourceLocation();
+ S->ColonLoc = ReadSourceLocation();
+ S->RParenLoc = ReadSourceLocation();
+ S->setRangeStmt(Record.readSubStmt());
+ S->setBeginStmt(Record.readSubStmt());
+ S->setEndStmt(Record.readSubStmt());
+ S->setCond(Record.readSubExpr());
+ S->setInc(Record.readSubExpr());
+ S->setLoopVarStmt(Record.readSubStmt());
+ S->setBody(Record.readSubStmt());
}
void ASTStmtReader::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
VisitStmt(S);
- S->KeywordLoc = ReadSourceLocation(Record, Idx);
- S->IsIfExists = Record[Idx++];
- S->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
- ReadDeclarationNameInfo(S->NameInfo, Record, Idx);
- S->SubStmt = Reader.ReadSubStmt();
+ S->KeywordLoc = ReadSourceLocation();
+ S->IsIfExists = Record.readInt();
+ S->QualifierLoc = Record.readNestedNameSpecifierLoc();
+ ReadDeclarationNameInfo(S->NameInfo);
+ S->SubStmt = Record.readSubStmt();
}
void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
VisitCallExpr(E);
- E->Operator = (OverloadedOperatorKind)Record[Idx++];
- E->Range = Reader.ReadSourceRange(F, Record, Idx);
- E->setFPContractable((bool)Record[Idx++]);
+ E->Operator = (OverloadedOperatorKind)Record.readInt();
+ E->Range = Record.readSourceRange();
+ E->setFPContractable((bool)Record.readInt());
}
void ASTStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
VisitExpr(E);
- E->NumArgs = Record[Idx++];
+ E->NumArgs = Record.readInt();
if (E->NumArgs)
- E->Args = new (Reader.getContext()) Stmt*[E->NumArgs];
+ E->Args = new (Record.getContext()) Stmt*[E->NumArgs];
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
- E->setArg(I, Reader.ReadSubExpr());
- E->setConstructor(ReadDeclAs<CXXConstructorDecl>(Record, Idx));
- E->setLocation(ReadSourceLocation(Record, Idx));
- E->setElidable(Record[Idx++]);
- E->setHadMultipleCandidates(Record[Idx++]);
- E->setListInitialization(Record[Idx++]);
- E->setStdInitListInitialization(Record[Idx++]);
- E->setRequiresZeroInitialization(Record[Idx++]);
- E->setConstructionKind((CXXConstructExpr::ConstructionKind)Record[Idx++]);
- E->ParenOrBraceRange = ReadSourceRange(Record, Idx);
+ E->setArg(I, Record.readSubExpr());
+ E->setConstructor(ReadDeclAs<CXXConstructorDecl>());
+ E->setLocation(ReadSourceLocation());
+ E->setElidable(Record.readInt());
+ E->setHadMultipleCandidates(Record.readInt());
+ E->setListInitialization(Record.readInt());
+ E->setStdInitListInitialization(Record.readInt());
+ E->setRequiresZeroInitialization(Record.readInt());
+ E->setConstructionKind((CXXConstructExpr::ConstructionKind)Record.readInt());
+ E->ParenOrBraceRange = ReadSourceRange();
}
void ASTStmtReader::VisitCXXInheritedCtorInitExpr(CXXInheritedCtorInitExpr *E) {
VisitExpr(E);
- E->Constructor = ReadDeclAs<CXXConstructorDecl>(Record, Idx);
- E->Loc = ReadSourceLocation(Record, Idx);
- E->ConstructsVirtualBase = Record[Idx++];
- E->InheritedFromVirtualBase = Record[Idx++];
+ E->Constructor = ReadDeclAs<CXXConstructorDecl>();
+ E->Loc = ReadSourceLocation();
+ E->ConstructsVirtualBase = Record.readInt();
+ E->InheritedFromVirtualBase = Record.readInt();
}
void ASTStmtReader::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
VisitCXXConstructExpr(E);
- E->Type = GetTypeSourceInfo(Record, Idx);
+ E->Type = GetTypeSourceInfo();
}
void ASTStmtReader::VisitLambdaExpr(LambdaExpr *E) {
VisitExpr(E);
- unsigned NumCaptures = Record[Idx++];
+ unsigned NumCaptures = Record.readInt();
assert(NumCaptures == E->NumCaptures);(void)NumCaptures;
- unsigned NumArrayIndexVars = Record[Idx++];
- E->IntroducerRange = ReadSourceRange(Record, Idx);
- E->CaptureDefault = static_cast<LambdaCaptureDefault>(Record[Idx++]);
- E->CaptureDefaultLoc = ReadSourceLocation(Record, Idx);
- E->ExplicitParams = Record[Idx++];
- E->ExplicitResultType = Record[Idx++];
- E->ClosingBrace = ReadSourceLocation(Record, Idx);
-
+ E->IntroducerRange = ReadSourceRange();
+ E->CaptureDefault = static_cast<LambdaCaptureDefault>(Record.readInt());
+ E->CaptureDefaultLoc = ReadSourceLocation();
+ E->ExplicitParams = Record.readInt();
+ E->ExplicitResultType = Record.readInt();
+ E->ClosingBrace = ReadSourceLocation();
+
// Read capture initializers.
for (LambdaExpr::capture_init_iterator C = E->capture_init_begin(),
CEnd = E->capture_init_end();
C != CEnd; ++C)
- *C = Reader.ReadSubExpr();
-
- // Read array capture index variables.
- if (NumArrayIndexVars > 0) {
- unsigned *ArrayIndexStarts = E->getArrayIndexStarts();
- for (unsigned I = 0; I != NumCaptures + 1; ++I)
- ArrayIndexStarts[I] = Record[Idx++];
-
- VarDecl **ArrayIndexVars = E->getArrayIndexVars();
- for (unsigned I = 0; I != NumArrayIndexVars; ++I)
- ArrayIndexVars[I] = ReadDeclAs<VarDecl>(Record, Idx);
- }
+ *C = Record.readSubExpr();
}
void
ASTStmtReader::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
VisitExpr(E);
- E->SubExpr = Reader.ReadSubExpr();
+ E->SubExpr = Record.readSubExpr();
}
void ASTStmtReader::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
VisitExplicitCastExpr(E);
- SourceRange R = ReadSourceRange(Record, Idx);
+ SourceRange R = ReadSourceRange();
E->Loc = R.getBegin();
E->RParenLoc = R.getEnd();
- R = ReadSourceRange(Record, Idx);
+ R = ReadSourceRange();
E->AngleBrackets = R;
}
@@ -1336,342 +1305,345 @@ void ASTStmtReader::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
void ASTStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
VisitExplicitCastExpr(E);
- E->setLParenLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setLParenLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
VisitCallExpr(E);
- E->UDSuffixLoc = ReadSourceLocation(Record, Idx);
+ E->UDSuffixLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
VisitExpr(E);
- E->setValue(Record[Idx++]);
- E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setValue(Record.readInt());
+ E->setLocation(ReadSourceLocation());
}
void ASTStmtReader::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
VisitExpr(E);
- E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setLocation(ReadSourceLocation());
}
void ASTStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
VisitExpr(E);
- E->setSourceRange(ReadSourceRange(Record, Idx));
+ E->setSourceRange(ReadSourceRange());
if (E->isTypeOperand()) { // typeid(int)
E->setTypeOperandSourceInfo(
- GetTypeSourceInfo(Record, Idx));
+ GetTypeSourceInfo());
return;
}
-
+
// typeid(42+2)
- E->setExprOperand(Reader.ReadSubExpr());
+ E->setExprOperand(Record.readSubExpr());
}
void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
VisitExpr(E);
- E->setLocation(ReadSourceLocation(Record, Idx));
- E->setImplicit(Record[Idx++]);
+ E->setLocation(ReadSourceLocation());
+ E->setImplicit(Record.readInt());
}
void ASTStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
VisitExpr(E);
- E->ThrowLoc = ReadSourceLocation(Record, Idx);
- E->Op = Reader.ReadSubExpr();
- E->IsThrownVariableInScope = Record[Idx++];
+ E->ThrowLoc = ReadSourceLocation();
+ E->Op = Record.readSubExpr();
+ E->IsThrownVariableInScope = Record.readInt();
}
void ASTStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
VisitExpr(E);
- E->Param = ReadDeclAs<ParmVarDecl>(Record, Idx);
- E->Loc = ReadSourceLocation(Record, Idx);
+ E->Param = ReadDeclAs<ParmVarDecl>();
+ E->Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
VisitExpr(E);
- E->Field = ReadDeclAs<FieldDecl>(Record, Idx);
- E->Loc = ReadSourceLocation(Record, Idx);
+ E->Field = ReadDeclAs<FieldDecl>();
+ E->Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
VisitExpr(E);
- E->setTemporary(Reader.ReadCXXTemporary(F, Record, Idx));
- E->setSubExpr(Reader.ReadSubExpr());
+ E->setTemporary(Record.readCXXTemporary());
+ E->setSubExpr(Record.readSubExpr());
}
void ASTStmtReader::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
VisitExpr(E);
- E->TypeInfo = GetTypeSourceInfo(Record, Idx);
- E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->TypeInfo = GetTypeSourceInfo();
+ E->RParenLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
VisitExpr(E);
- E->GlobalNew = Record[Idx++];
- bool isArray = Record[Idx++];
- E->UsualArrayDeleteWantsSize = Record[Idx++];
- unsigned NumPlacementArgs = Record[Idx++];
- E->StoredInitializationStyle = Record[Idx++];
- E->setOperatorNew(ReadDeclAs<FunctionDecl>(Record, Idx));
- E->setOperatorDelete(ReadDeclAs<FunctionDecl>(Record, Idx));
- E->AllocatedTypeInfo = GetTypeSourceInfo(Record, Idx);
- E->TypeIdParens = ReadSourceRange(Record, Idx);
- E->Range = ReadSourceRange(Record, Idx);
- E->DirectInitRange = ReadSourceRange(Record, Idx);
-
- E->AllocateArgsArray(Reader.getContext(), isArray, NumPlacementArgs,
+ E->GlobalNew = Record.readInt();
+ bool isArray = Record.readInt();
+ E->PassAlignment = Record.readInt();
+ E->UsualArrayDeleteWantsSize = Record.readInt();
+ unsigned NumPlacementArgs = Record.readInt();
+ E->StoredInitializationStyle = Record.readInt();
+ E->setOperatorNew(ReadDeclAs<FunctionDecl>());
+ E->setOperatorDelete(ReadDeclAs<FunctionDecl>());
+ E->AllocatedTypeInfo = GetTypeSourceInfo();
+ E->TypeIdParens = ReadSourceRange();
+ E->Range = ReadSourceRange();
+ E->DirectInitRange = ReadSourceRange();
+
+ E->AllocateArgsArray(Record.getContext(), isArray, NumPlacementArgs,
E->StoredInitializationStyle != 0);
// Install all the subexpressions.
for (CXXNewExpr::raw_arg_iterator I = E->raw_arg_begin(),e = E->raw_arg_end();
I != e; ++I)
- *I = Reader.ReadSubStmt();
+ *I = Record.readSubStmt();
}
void ASTStmtReader::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
VisitExpr(E);
- E->GlobalDelete = Record[Idx++];
- E->ArrayForm = Record[Idx++];
- E->ArrayFormAsWritten = Record[Idx++];
- E->UsualArrayDeleteWantsSize = Record[Idx++];
- E->OperatorDelete = ReadDeclAs<FunctionDecl>(Record, Idx);
- E->Argument = Reader.ReadSubExpr();
- E->Loc = ReadSourceLocation(Record, Idx);
+ E->GlobalDelete = Record.readInt();
+ E->ArrayForm = Record.readInt();
+ E->ArrayFormAsWritten = Record.readInt();
+ E->UsualArrayDeleteWantsSize = Record.readInt();
+ E->OperatorDelete = ReadDeclAs<FunctionDecl>();
+ E->Argument = Record.readSubExpr();
+ E->Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
VisitExpr(E);
- E->Base = Reader.ReadSubExpr();
- E->IsArrow = Record[Idx++];
- E->OperatorLoc = ReadSourceLocation(Record, Idx);
- E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
- E->ScopeType = GetTypeSourceInfo(Record, Idx);
- E->ColonColonLoc = ReadSourceLocation(Record, Idx);
- E->TildeLoc = ReadSourceLocation(Record, Idx);
-
- IdentifierInfo *II = Reader.GetIdentifierInfo(F, Record, Idx);
+ E->Base = Record.readSubExpr();
+ E->IsArrow = Record.readInt();
+ E->OperatorLoc = ReadSourceLocation();
+ E->QualifierLoc = Record.readNestedNameSpecifierLoc();
+ E->ScopeType = GetTypeSourceInfo();
+ E->ColonColonLoc = ReadSourceLocation();
+ E->TildeLoc = ReadSourceLocation();
+
+ IdentifierInfo *II = Record.getIdentifierInfo();
if (II)
- E->setDestroyedType(II, ReadSourceLocation(Record, Idx));
+ E->setDestroyedType(II, ReadSourceLocation());
else
- E->setDestroyedType(GetTypeSourceInfo(Record, Idx));
+ E->setDestroyedType(GetTypeSourceInfo());
}
void ASTStmtReader::VisitExprWithCleanups(ExprWithCleanups *E) {
VisitExpr(E);
- unsigned NumObjects = Record[Idx++];
+ unsigned NumObjects = Record.readInt();
assert(NumObjects == E->getNumObjects());
for (unsigned i = 0; i != NumObjects; ++i)
E->getTrailingObjects<BlockDecl *>()[i] =
- ReadDeclAs<BlockDecl>(Record, Idx);
+ ReadDeclAs<BlockDecl>();
- E->ExprWithCleanupsBits.CleanupsHaveSideEffects = Record[Idx++];
- E->SubExpr = Reader.ReadSubExpr();
+ E->ExprWithCleanupsBits.CleanupsHaveSideEffects = Record.readInt();
+ E->SubExpr = Record.readSubExpr();
}
void
ASTStmtReader::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
VisitExpr(E);
- if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ if (Record.readInt()) // HasTemplateKWAndArgsInfo
ReadTemplateKWAndArgsInfo(
*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
E->getTrailingObjects<TemplateArgumentLoc>(),
- /*NumTemplateArgs=*/Record[Idx++]);
+ /*NumTemplateArgs=*/Record.readInt());
- E->Base = Reader.ReadSubExpr();
- E->BaseType = Reader.readType(F, Record, Idx);
- E->IsArrow = Record[Idx++];
- E->OperatorLoc = ReadSourceLocation(Record, Idx);
- E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
- E->FirstQualifierFoundInScope = ReadDeclAs<NamedDecl>(Record, Idx);
- ReadDeclarationNameInfo(E->MemberNameInfo, Record, Idx);
+ E->Base = Record.readSubExpr();
+ E->BaseType = Record.readType();
+ E->IsArrow = Record.readInt();
+ E->OperatorLoc = ReadSourceLocation();
+ E->QualifierLoc = Record.readNestedNameSpecifierLoc();
+ E->FirstQualifierFoundInScope = ReadDeclAs<NamedDecl>();
+ ReadDeclarationNameInfo(E->MemberNameInfo);
}
void
ASTStmtReader::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
VisitExpr(E);
- if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ if (Record.readInt()) // HasTemplateKWAndArgsInfo
ReadTemplateKWAndArgsInfo(
*E->getTrailingObjects<ASTTemplateKWAndArgsInfo>(),
E->getTrailingObjects<TemplateArgumentLoc>(),
- /*NumTemplateArgs=*/Record[Idx++]);
+ /*NumTemplateArgs=*/Record.readInt());
- E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
- ReadDeclarationNameInfo(E->NameInfo, Record, Idx);
+ E->QualifierLoc = Record.readNestedNameSpecifierLoc();
+ ReadDeclarationNameInfo(E->NameInfo);
}
void
ASTStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
VisitExpr(E);
- assert(Record[Idx] == E->arg_size() && "Read wrong record during creation ?");
- ++Idx; // NumArgs;
+ assert(Record.peekInt() == E->arg_size() &&
+ "Read wrong record during creation ?");
+ Record.skipInts(1);
for (unsigned I = 0, N = E->arg_size(); I != N; ++I)
- E->setArg(I, Reader.ReadSubExpr());
- E->Type = GetTypeSourceInfo(Record, Idx);
- E->setLParenLoc(ReadSourceLocation(Record, Idx));
- E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setArg(I, Record.readSubExpr());
+ E->Type = GetTypeSourceInfo();
+ E->setLParenLoc(ReadSourceLocation());
+ E->setRParenLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
VisitExpr(E);
- if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ if (Record.readInt()) // HasTemplateKWAndArgsInfo
ReadTemplateKWAndArgsInfo(*E->getTrailingASTTemplateKWAndArgsInfo(),
E->getTrailingTemplateArgumentLoc(),
- /*NumTemplateArgs=*/Record[Idx++]);
+ /*NumTemplateArgs=*/Record.readInt());
- unsigned NumDecls = Record[Idx++];
+ unsigned NumDecls = Record.readInt();
UnresolvedSet<8> Decls;
for (unsigned i = 0; i != NumDecls; ++i) {
- NamedDecl *D = ReadDeclAs<NamedDecl>(Record, Idx);
- AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ NamedDecl *D = ReadDeclAs<NamedDecl>();
+ AccessSpecifier AS = (AccessSpecifier)Record.readInt();
Decls.addDecl(D, AS);
}
- E->initializeResults(Reader.getContext(), Decls.begin(), Decls.end());
+ E->initializeResults(Record.getContext(), Decls.begin(), Decls.end());
- ReadDeclarationNameInfo(E->NameInfo, Record, Idx);
- E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ ReadDeclarationNameInfo(E->NameInfo);
+ E->QualifierLoc = Record.readNestedNameSpecifierLoc();
}
void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
VisitOverloadExpr(E);
- E->IsArrow = Record[Idx++];
- E->HasUnresolvedUsing = Record[Idx++];
- E->Base = Reader.ReadSubExpr();
- E->BaseType = Reader.readType(F, Record, Idx);
- E->OperatorLoc = ReadSourceLocation(Record, Idx);
+ E->IsArrow = Record.readInt();
+ E->HasUnresolvedUsing = Record.readInt();
+ E->Base = Record.readSubExpr();
+ E->BaseType = Record.readType();
+ E->OperatorLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
VisitOverloadExpr(E);
- E->RequiresADL = Record[Idx++];
- E->Overloaded = Record[Idx++];
- E->NamingClass = ReadDeclAs<CXXRecordDecl>(Record, Idx);
+ E->RequiresADL = Record.readInt();
+ E->Overloaded = Record.readInt();
+ E->NamingClass = ReadDeclAs<CXXRecordDecl>();
}
void ASTStmtReader::VisitTypeTraitExpr(TypeTraitExpr *E) {
VisitExpr(E);
- E->TypeTraitExprBits.NumArgs = Record[Idx++];
- E->TypeTraitExprBits.Kind = Record[Idx++];
- E->TypeTraitExprBits.Value = Record[Idx++];
- SourceRange Range = ReadSourceRange(Record, Idx);
+ E->TypeTraitExprBits.NumArgs = Record.readInt();
+ E->TypeTraitExprBits.Kind = Record.readInt();
+ E->TypeTraitExprBits.Value = Record.readInt();
+ SourceRange Range = ReadSourceRange();
E->Loc = Range.getBegin();
E->RParenLoc = Range.getEnd();
TypeSourceInfo **Args = E->getTrailingObjects<TypeSourceInfo *>();
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
- Args[I] = GetTypeSourceInfo(Record, Idx);
+ Args[I] = GetTypeSourceInfo();
}
void ASTStmtReader::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
VisitExpr(E);
- E->ATT = (ArrayTypeTrait)Record[Idx++];
- E->Value = (unsigned int)Record[Idx++];
- SourceRange Range = ReadSourceRange(Record, Idx);
+ E->ATT = (ArrayTypeTrait)Record.readInt();
+ E->Value = (unsigned int)Record.readInt();
+ SourceRange Range = ReadSourceRange();
E->Loc = Range.getBegin();
E->RParen = Range.getEnd();
- E->QueriedType = GetTypeSourceInfo(Record, Idx);
+ E->QueriedType = GetTypeSourceInfo();
+ E->Dimension = Record.readSubExpr();
}
void ASTStmtReader::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
VisitExpr(E);
- E->ET = (ExpressionTrait)Record[Idx++];
- E->Value = (bool)Record[Idx++];
- SourceRange Range = ReadSourceRange(Record, Idx);
- E->QueriedExpression = Reader.ReadSubExpr();
+ E->ET = (ExpressionTrait)Record.readInt();
+ E->Value = (bool)Record.readInt();
+ SourceRange Range = ReadSourceRange();
+ E->QueriedExpression = Record.readSubExpr();
E->Loc = Range.getBegin();
E->RParen = Range.getEnd();
}
void ASTStmtReader::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
VisitExpr(E);
- E->Value = (bool)Record[Idx++];
- E->Range = ReadSourceRange(Record, Idx);
- E->Operand = Reader.ReadSubExpr();
+ E->Value = (bool)Record.readInt();
+ E->Range = ReadSourceRange();
+ E->Operand = Record.readSubExpr();
}
void ASTStmtReader::VisitPackExpansionExpr(PackExpansionExpr *E) {
VisitExpr(E);
- E->EllipsisLoc = ReadSourceLocation(Record, Idx);
- E->NumExpansions = Record[Idx++];
- E->Pattern = Reader.ReadSubExpr();
+ E->EllipsisLoc = ReadSourceLocation();
+ E->NumExpansions = Record.readInt();
+ E->Pattern = Record.readSubExpr();
}
void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
VisitExpr(E);
- unsigned NumPartialArgs = Record[Idx++];
- E->OperatorLoc = ReadSourceLocation(Record, Idx);
- E->PackLoc = ReadSourceLocation(Record, Idx);
- E->RParenLoc = ReadSourceLocation(Record, Idx);
- E->Pack = Reader.ReadDeclAs<NamedDecl>(F, Record, Idx);
+ unsigned NumPartialArgs = Record.readInt();
+ E->OperatorLoc = ReadSourceLocation();
+ E->PackLoc = ReadSourceLocation();
+ E->RParenLoc = ReadSourceLocation();
+ E->Pack = Record.readDeclAs<NamedDecl>();
if (E->isPartiallySubstituted()) {
assert(E->Length == NumPartialArgs);
for (auto *I = E->getTrailingObjects<TemplateArgument>(),
*E = I + NumPartialArgs;
I != E; ++I)
- new (I) TemplateArgument(Reader.ReadTemplateArgument(F, Record, Idx));
+ new (I) TemplateArgument(Record.readTemplateArgument());
} else if (!E->isValueDependent()) {
- E->Length = Record[Idx++];
+ E->Length = Record.readInt();
}
}
void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr(
SubstNonTypeTemplateParmExpr *E) {
VisitExpr(E);
- E->Param = ReadDeclAs<NonTypeTemplateParmDecl>(Record, Idx);
- E->NameLoc = ReadSourceLocation(Record, Idx);
- E->Replacement = Reader.ReadSubExpr();
+ E->Param = ReadDeclAs<NonTypeTemplateParmDecl>();
+ E->NameLoc = ReadSourceLocation();
+ E->Replacement = Record.readSubExpr();
}
void ASTStmtReader::VisitSubstNonTypeTemplateParmPackExpr(
SubstNonTypeTemplateParmPackExpr *E) {
VisitExpr(E);
- E->Param = ReadDeclAs<NonTypeTemplateParmDecl>(Record, Idx);
- TemplateArgument ArgPack = Reader.ReadTemplateArgument(F, Record, Idx);
+ E->Param = ReadDeclAs<NonTypeTemplateParmDecl>();
+ TemplateArgument ArgPack = Record.readTemplateArgument();
if (ArgPack.getKind() != TemplateArgument::Pack)
return;
-
+
E->Arguments = ArgPack.pack_begin();
E->NumArguments = ArgPack.pack_size();
- E->NameLoc = ReadSourceLocation(Record, Idx);
+ E->NameLoc = ReadSourceLocation();
}
void ASTStmtReader::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
VisitExpr(E);
- E->NumParameters = Record[Idx++];
- E->ParamPack = ReadDeclAs<ParmVarDecl>(Record, Idx);
- E->NameLoc = ReadSourceLocation(Record, Idx);
+ E->NumParameters = Record.readInt();
+ E->ParamPack = ReadDeclAs<ParmVarDecl>();
+ E->NameLoc = ReadSourceLocation();
ParmVarDecl **Parms = E->getTrailingObjects<ParmVarDecl *>();
for (unsigned i = 0, n = E->NumParameters; i != n; ++i)
- Parms[i] = ReadDeclAs<ParmVarDecl>(Record, Idx);
+ Parms[i] = ReadDeclAs<ParmVarDecl>();
}
void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
VisitExpr(E);
- E->State = Reader.ReadSubExpr();
- auto VD = ReadDeclAs<ValueDecl>(Record, Idx);
- unsigned ManglingNumber = Record[Idx++];
+ E->State = Record.readSubExpr();
+ auto VD = ReadDeclAs<ValueDecl>();
+ unsigned ManglingNumber = Record.readInt();
E->setExtendingDecl(VD, ManglingNumber);
}
void ASTStmtReader::VisitCXXFoldExpr(CXXFoldExpr *E) {
VisitExpr(E);
- E->LParenLoc = ReadSourceLocation(Record, Idx);
- E->EllipsisLoc = ReadSourceLocation(Record, Idx);
- E->RParenLoc = ReadSourceLocation(Record, Idx);
- E->SubExprs[0] = Reader.ReadSubExpr();
- E->SubExprs[1] = Reader.ReadSubExpr();
- E->Opcode = (BinaryOperatorKind)Record[Idx++];
+ E->LParenLoc = ReadSourceLocation();
+ E->EllipsisLoc = ReadSourceLocation();
+ E->RParenLoc = ReadSourceLocation();
+ E->SubExprs[0] = Record.readSubExpr();
+ E->SubExprs[1] = Record.readSubExpr();
+ E->Opcode = (BinaryOperatorKind)Record.readInt();
}
void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
VisitExpr(E);
- E->SourceExpr = Reader.ReadSubExpr();
- E->Loc = ReadSourceLocation(Record, Idx);
+ E->SourceExpr = Record.readSubExpr();
+ E->Loc = ReadSourceLocation();
}
void ASTStmtReader::VisitTypoExpr(TypoExpr *E) {
@@ -1683,59 +1655,59 @@ void ASTStmtReader::VisitTypoExpr(TypoExpr *E) {
//===----------------------------------------------------------------------===//
void ASTStmtReader::VisitMSPropertyRefExpr(MSPropertyRefExpr *E) {
VisitExpr(E);
- E->IsArrow = (Record[Idx++] != 0);
- E->BaseExpr = Reader.ReadSubExpr();
- E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
- E->MemberLoc = ReadSourceLocation(Record, Idx);
- E->TheDecl = ReadDeclAs<MSPropertyDecl>(Record, Idx);
+ E->IsArrow = (Record.readInt() != 0);
+ E->BaseExpr = Record.readSubExpr();
+ E->QualifierLoc = Record.readNestedNameSpecifierLoc();
+ E->MemberLoc = ReadSourceLocation();
+ E->TheDecl = ReadDeclAs<MSPropertyDecl>();
}
void ASTStmtReader::VisitMSPropertySubscriptExpr(MSPropertySubscriptExpr *E) {
VisitExpr(E);
- E->setBase(Reader.ReadSubExpr());
- E->setIdx(Reader.ReadSubExpr());
- E->setRBracketLoc(ReadSourceLocation(Record, Idx));
+ E->setBase(Record.readSubExpr());
+ E->setIdx(Record.readSubExpr());
+ E->setRBracketLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
VisitExpr(E);
- E->setSourceRange(ReadSourceRange(Record, Idx));
- std::string UuidStr = ReadString(Record, Idx);
- E->setUuidStr(StringRef(UuidStr).copy(Reader.getContext()));
+ E->setSourceRange(ReadSourceRange());
+ std::string UuidStr = ReadString();
+ E->setUuidStr(StringRef(UuidStr).copy(Record.getContext()));
if (E->isTypeOperand()) { // __uuidof(ComType)
E->setTypeOperandSourceInfo(
- GetTypeSourceInfo(Record, Idx));
+ GetTypeSourceInfo());
return;
}
-
+
// __uuidof(expr)
- E->setExprOperand(Reader.ReadSubExpr());
+ E->setExprOperand(Record.readSubExpr());
}
void ASTStmtReader::VisitSEHLeaveStmt(SEHLeaveStmt *S) {
VisitStmt(S);
- S->setLeaveLoc(ReadSourceLocation(Record, Idx));
+ S->setLeaveLoc(ReadSourceLocation());
}
void ASTStmtReader::VisitSEHExceptStmt(SEHExceptStmt *S) {
VisitStmt(S);
- S->Loc = ReadSourceLocation(Record, Idx);
- S->Children[SEHExceptStmt::FILTER_EXPR] = Reader.ReadSubStmt();
- S->Children[SEHExceptStmt::BLOCK] = Reader.ReadSubStmt();
+ S->Loc = ReadSourceLocation();
+ S->Children[SEHExceptStmt::FILTER_EXPR] = Record.readSubStmt();
+ S->Children[SEHExceptStmt::BLOCK] = Record.readSubStmt();
}
void ASTStmtReader::VisitSEHFinallyStmt(SEHFinallyStmt *S) {
VisitStmt(S);
- S->Loc = ReadSourceLocation(Record, Idx);
- S->Block = Reader.ReadSubStmt();
+ S->Loc = ReadSourceLocation();
+ S->Block = Record.readSubStmt();
}
void ASTStmtReader::VisitSEHTryStmt(SEHTryStmt *S) {
VisitStmt(S);
- S->IsCXXTry = Record[Idx++];
- S->TryLoc = ReadSourceLocation(Record, Idx);
- S->Children[SEHTryStmt::TRY] = Reader.ReadSubStmt();
- S->Children[SEHTryStmt::HANDLER] = Reader.ReadSubStmt();
+ S->IsCXXTry = Record.readInt();
+ S->TryLoc = ReadSourceLocation();
+ S->Children[SEHTryStmt::TRY] = Record.readSubStmt();
+ S->Children[SEHTryStmt::HANDLER] = Record.readSubStmt();
}
//===----------------------------------------------------------------------===//
@@ -1744,7 +1716,7 @@ void ASTStmtReader::VisitSEHTryStmt(SEHTryStmt *S) {
void ASTStmtReader::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
VisitCallExpr(E);
- E->setConfig(cast<CallExpr>(Reader.ReadSubExpr()));
+ E->setConfig(cast<CallExpr>(Record.readSubExpr()));
}
//===----------------------------------------------------------------------===//
@@ -1752,9 +1724,9 @@ void ASTStmtReader::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
//===----------------------------------------------------------------------===//
void ASTStmtReader::VisitAsTypeExpr(AsTypeExpr *E) {
VisitExpr(E);
- E->BuiltinLoc = ReadSourceLocation(Record, Idx);
- E->RParenLoc = ReadSourceLocation(Record, Idx);
- E->SrcExpr = Reader.ReadSubExpr();
+ E->BuiltinLoc = ReadSourceLocation();
+ E->RParenLoc = ReadSourceLocation();
+ E->SrcExpr = Record.readSubExpr();
}
//===----------------------------------------------------------------------===//
@@ -1765,12 +1737,9 @@ namespace clang {
class OMPClauseReader : public OMPClauseVisitor<OMPClauseReader> {
ASTStmtReader *Reader;
ASTContext &Context;
- const ASTReader::RecordData &Record;
- unsigned &Idx;
public:
- OMPClauseReader(ASTStmtReader *R, ASTContext &C,
- const ASTReader::RecordData &Record, unsigned &Idx)
- : Reader(R), Context(C), Record(Record), Idx(Idx) { }
+ OMPClauseReader(ASTStmtReader *R, ASTRecordReader &Record)
+ : Reader(R), Context(Record.getContext()) {}
#define OPENMP_CLAUSE(Name, Class) void Visit##Class(Class *C);
#include "clang/Basic/OpenMPKinds.def"
OMPClause *readClause();
@@ -1781,7 +1750,7 @@ public:
OMPClause *OMPClauseReader::readClause() {
OMPClause *C;
- switch (Record[Idx++]) {
+ switch (Reader->Record.readInt()) {
case OMPC_if:
C = new (Context) OMPIfClause();
break;
@@ -1846,46 +1815,46 @@ OMPClause *OMPClauseReader::readClause() {
C = new (Context) OMPNogroupClause();
break;
case OMPC_private:
- C = OMPPrivateClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPPrivateClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_firstprivate:
- C = OMPFirstprivateClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPFirstprivateClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_lastprivate:
- C = OMPLastprivateClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPLastprivateClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_shared:
- C = OMPSharedClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPSharedClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_reduction:
- C = OMPReductionClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPReductionClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_linear:
- C = OMPLinearClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPLinearClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_aligned:
- C = OMPAlignedClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPAlignedClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_copyin:
- C = OMPCopyinClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPCopyinClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_copyprivate:
- C = OMPCopyprivateClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPCopyprivateClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_flush:
- C = OMPFlushClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPFlushClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_depend:
- C = OMPDependClause::CreateEmpty(Context, Record[Idx++]);
+ C = OMPDependClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_device:
C = new (Context) OMPDeviceClause();
break;
case OMPC_map: {
- unsigned NumVars = Record[Idx++];
- unsigned NumDeclarations = Record[Idx++];
- unsigned NumLists = Record[Idx++];
- unsigned NumComponents = Record[Idx++];
+ unsigned NumVars = Reader->Record.readInt();
+ unsigned NumDeclarations = Reader->Record.readInt();
+ unsigned NumLists = Reader->Record.readInt();
+ unsigned NumComponents = Reader->Record.readInt();
C = OMPMapClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
NumComponents);
break;
@@ -1915,112 +1884,124 @@ OMPClause *OMPClauseReader::readClause() {
C = new (Context) OMPDefaultmapClause();
break;
case OMPC_to: {
- unsigned NumVars = Record[Idx++];
- unsigned NumDeclarations = Record[Idx++];
- unsigned NumLists = Record[Idx++];
- unsigned NumComponents = Record[Idx++];
+ unsigned NumVars = Reader->Record.readInt();
+ unsigned NumDeclarations = Reader->Record.readInt();
+ unsigned NumLists = Reader->Record.readInt();
+ unsigned NumComponents = Reader->Record.readInt();
C = OMPToClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
NumComponents);
break;
}
case OMPC_from: {
- unsigned NumVars = Record[Idx++];
- unsigned NumDeclarations = Record[Idx++];
- unsigned NumLists = Record[Idx++];
- unsigned NumComponents = Record[Idx++];
+ unsigned NumVars = Reader->Record.readInt();
+ unsigned NumDeclarations = Reader->Record.readInt();
+ unsigned NumLists = Reader->Record.readInt();
+ unsigned NumComponents = Reader->Record.readInt();
C = OMPFromClause::CreateEmpty(Context, NumVars, NumDeclarations, NumLists,
NumComponents);
break;
}
- case OMPC_use_device_ptr:
- C = OMPUseDevicePtrClause::CreateEmpty(Context, Record[Idx++]);
+ case OMPC_use_device_ptr: {
+ unsigned NumVars = Reader->Record.readInt();
+ unsigned NumDeclarations = Reader->Record.readInt();
+ unsigned NumLists = Reader->Record.readInt();
+ unsigned NumComponents = Reader->Record.readInt();
+ C = OMPUseDevicePtrClause::CreateEmpty(Context, NumVars, NumDeclarations,
+ NumLists, NumComponents);
break;
- case OMPC_is_device_ptr:
- C = OMPIsDevicePtrClause::CreateEmpty(Context, Record[Idx++]);
+ }
+ case OMPC_is_device_ptr: {
+ unsigned NumVars = Reader->Record.readInt();
+ unsigned NumDeclarations = Reader->Record.readInt();
+ unsigned NumLists = Reader->Record.readInt();
+ unsigned NumComponents = Reader->Record.readInt();
+ C = OMPIsDevicePtrClause::CreateEmpty(Context, NumVars, NumDeclarations,
+ NumLists, NumComponents);
break;
}
+ }
Visit(C);
- C->setLocStart(Reader->ReadSourceLocation(Record, Idx));
- C->setLocEnd(Reader->ReadSourceLocation(Record, Idx));
+ C->setLocStart(Reader->ReadSourceLocation());
+ C->setLocEnd(Reader->ReadSourceLocation());
return C;
}
void OMPClauseReader::VisitOMPClauseWithPreInit(OMPClauseWithPreInit *C) {
- C->setPreInitStmt(Reader->Reader.ReadSubStmt());
+ C->setPreInitStmt(Reader->Record.readSubStmt());
}
void OMPClauseReader::VisitOMPClauseWithPostUpdate(OMPClauseWithPostUpdate *C) {
VisitOMPClauseWithPreInit(C);
- C->setPostUpdateExpr(Reader->Reader.ReadSubExpr());
+ C->setPostUpdateExpr(Reader->Record.readSubExpr());
}
void OMPClauseReader::VisitOMPIfClause(OMPIfClause *C) {
- C->setNameModifier(static_cast<OpenMPDirectiveKind>(Record[Idx++]));
- C->setNameModifierLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setCondition(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setNameModifier(static_cast<OpenMPDirectiveKind>(Reader->Record.readInt()));
+ C->setNameModifierLoc(Reader->ReadSourceLocation());
+ C->setColonLoc(Reader->ReadSourceLocation());
+ C->setCondition(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPFinalClause(OMPFinalClause *C) {
- C->setCondition(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setCondition(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPNumThreadsClause(OMPNumThreadsClause *C) {
- C->setNumThreads(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setNumThreads(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPSafelenClause(OMPSafelenClause *C) {
- C->setSafelen(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setSafelen(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
- C->setSimdlen(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setSimdlen(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPCollapseClause(OMPCollapseClause *C) {
- C->setNumForLoops(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setNumForLoops(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPDefaultClause(OMPDefaultClause *C) {
C->setDefaultKind(
- static_cast<OpenMPDefaultClauseKind>(Record[Idx++]));
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setDefaultKindKwLoc(Reader->ReadSourceLocation(Record, Idx));
+ static_cast<OpenMPDefaultClauseKind>(Reader->Record.readInt()));
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ C->setDefaultKindKwLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPProcBindClause(OMPProcBindClause *C) {
C->setProcBindKind(
- static_cast<OpenMPProcBindClauseKind>(Record[Idx++]));
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setProcBindKindKwLoc(Reader->ReadSourceLocation(Record, Idx));
+ static_cast<OpenMPProcBindClauseKind>(Reader->Record.readInt()));
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ C->setProcBindKindKwLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPScheduleClause(OMPScheduleClause *C) {
VisitOMPClauseWithPreInit(C);
C->setScheduleKind(
- static_cast<OpenMPScheduleClauseKind>(Record[Idx++]));
+ static_cast<OpenMPScheduleClauseKind>(Reader->Record.readInt()));
C->setFirstScheduleModifier(
- static_cast<OpenMPScheduleClauseModifier>(Record[Idx++]));
+ static_cast<OpenMPScheduleClauseModifier>(Reader->Record.readInt()));
C->setSecondScheduleModifier(
- static_cast<OpenMPScheduleClauseModifier>(Record[Idx++]));
- C->setChunkSize(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setFirstScheduleModifierLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setSecondScheduleModifierLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setScheduleKindLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setCommaLoc(Reader->ReadSourceLocation(Record, Idx));
+ static_cast<OpenMPScheduleClauseModifier>(Reader->Record.readInt()));
+ C->setChunkSize(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ C->setFirstScheduleModifierLoc(Reader->ReadSourceLocation());
+ C->setSecondScheduleModifierLoc(Reader->ReadSourceLocation());
+ C->setScheduleKindLoc(Reader->ReadSourceLocation());
+ C->setCommaLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPOrderedClause(OMPOrderedClause *C) {
- C->setNumForLoops(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setNumForLoops(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPNowaitClause(OMPNowaitClause *) {}
@@ -2046,83 +2027,82 @@ void OMPClauseReader::VisitOMPSIMDClause(OMPSIMDClause *) {}
void OMPClauseReader::VisitOMPNogroupClause(OMPNogroupClause *) {}
void OMPClauseReader::VisitOMPPrivateClause(OMPPrivateClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setPrivateCopies(Vars);
}
void OMPClauseReader::VisitOMPFirstprivateClause(OMPFirstprivateClause *C) {
VisitOMPClauseWithPreInit(C);
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setPrivateCopies(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setInits(Vars);
}
void OMPClauseReader::VisitOMPLastprivateClause(OMPLastprivateClause *C) {
VisitOMPClauseWithPostUpdate(C);
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setPrivateCopies(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setSourceExprs(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setDestinationExprs(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setAssignmentOps(Vars);
}
void OMPClauseReader::VisitOMPSharedClause(OMPSharedClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
}
void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
VisitOMPClauseWithPostUpdate(C);
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
- NestedNameSpecifierLoc NNSL =
- Reader->Reader.ReadNestedNameSpecifierLoc(Reader->F, Record, Idx);
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ C->setColonLoc(Reader->ReadSourceLocation());
+ NestedNameSpecifierLoc NNSL = Reader->Record.readNestedNameSpecifierLoc();
DeclarationNameInfo DNI;
- Reader->ReadDeclarationNameInfo(DNI, Record, Idx);
+ Reader->ReadDeclarationNameInfo(DNI);
C->setQualifierLoc(NNSL);
C->setNameInfo(DNI);
@@ -2130,151 +2110,152 @@ void OMPClauseReader::VisitOMPReductionClause(OMPReductionClause *C) {
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setPrivates(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setLHSExprs(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setRHSExprs(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setReductionOps(Vars);
}
void OMPClauseReader::VisitOMPLinearClause(OMPLinearClause *C) {
VisitOMPClauseWithPostUpdate(C);
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setModifier(static_cast<OpenMPLinearClauseKind>(Record[Idx++]));
- C->setModifierLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ C->setColonLoc(Reader->ReadSourceLocation());
+ C->setModifier(static_cast<OpenMPLinearClauseKind>(Reader->Record.readInt()));
+ C->setModifierLoc(Reader->ReadSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setPrivates(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setInits(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setUpdates(Vars);
Vars.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setFinals(Vars);
- C->setStep(Reader->Reader.ReadSubExpr());
- C->setCalcStep(Reader->Reader.ReadSubExpr());
+ C->setStep(Reader->Record.readSubExpr());
+ C->setCalcStep(Reader->Record.readSubExpr());
}
void OMPClauseReader::VisitOMPAlignedClause(OMPAlignedClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ C->setColonLoc(Reader->ReadSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
- C->setAlignment(Reader->Reader.ReadSubExpr());
+ C->setAlignment(Reader->Record.readSubExpr());
}
void OMPClauseReader::VisitOMPCopyinClause(OMPCopyinClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Exprs;
Exprs.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Reader.ReadSubExpr());
+ Exprs.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Exprs);
Exprs.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Reader.ReadSubExpr());
+ Exprs.push_back(Reader->Record.readSubExpr());
C->setSourceExprs(Exprs);
Exprs.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Reader.ReadSubExpr());
+ Exprs.push_back(Reader->Record.readSubExpr());
C->setDestinationExprs(Exprs);
Exprs.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Reader.ReadSubExpr());
+ Exprs.push_back(Reader->Record.readSubExpr());
C->setAssignmentOps(Exprs);
}
void OMPClauseReader::VisitOMPCopyprivateClause(OMPCopyprivateClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Exprs;
Exprs.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Reader.ReadSubExpr());
+ Exprs.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Exprs);
Exprs.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Reader.ReadSubExpr());
+ Exprs.push_back(Reader->Record.readSubExpr());
C->setSourceExprs(Exprs);
Exprs.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Reader.ReadSubExpr());
+ Exprs.push_back(Reader->Record.readSubExpr());
C->setDestinationExprs(Exprs);
Exprs.clear();
for (unsigned i = 0; i != NumVars; ++i)
- Exprs.push_back(Reader->Reader.ReadSubExpr());
+ Exprs.push_back(Reader->Record.readSubExpr());
C->setAssignmentOps(Exprs);
}
void OMPClauseReader::VisitOMPFlushClause(OMPFlushClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
}
void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setDependencyKind(static_cast<OpenMPDependClauseKind>(Record[Idx++]));
- C->setDependencyLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ C->setDependencyKind(
+ static_cast<OpenMPDependClauseKind>(Reader->Record.readInt()));
+ C->setDependencyLoc(Reader->ReadSourceLocation());
+ C->setColonLoc(Reader->ReadSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
- C->setCounterValue(Reader->Reader.ReadSubExpr());
+ C->setCounterValue(Reader->Record.readSubExpr());
}
void OMPClauseReader::VisitOMPDeviceClause(OMPDeviceClause *C) {
- C->setDevice(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setDevice(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
C->setMapTypeModifier(
- static_cast<OpenMPMapClauseKind>(Record[Idx++]));
+ static_cast<OpenMPMapClauseKind>(Reader->Record.readInt()));
C->setMapType(
- static_cast<OpenMPMapClauseKind>(Record[Idx++]));
- C->setMapLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setColonLoc(Reader->ReadSourceLocation(Record, Idx));
+ static_cast<OpenMPMapClauseKind>(Reader->Record.readInt()));
+ C->setMapLoc(Reader->ReadSourceLocation());
+ C->setColonLoc(Reader->ReadSourceLocation());
auto NumVars = C->varlist_size();
auto UniqueDecls = C->getUniqueDeclarationsNum();
auto TotalLists = C->getTotalComponentListNum();
@@ -2283,34 +2264,32 @@ void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
SmallVector<ValueDecl *, 16> Decls;
Decls.reserve(UniqueDecls);
for (unsigned i = 0; i < UniqueDecls; ++i)
- Decls.push_back(
- Reader->Reader.ReadDeclAs<ValueDecl>(Reader->F, Record, Idx));
+ Decls.push_back(Reader->Record.readDeclAs<ValueDecl>());
C->setUniqueDecls(Decls);
SmallVector<unsigned, 16> ListsPerDecl;
ListsPerDecl.reserve(UniqueDecls);
for (unsigned i = 0; i < UniqueDecls; ++i)
- ListsPerDecl.push_back(Record[Idx++]);
+ ListsPerDecl.push_back(Reader->Record.readInt());
C->setDeclNumLists(ListsPerDecl);
SmallVector<unsigned, 32> ListSizes;
ListSizes.reserve(TotalLists);
for (unsigned i = 0; i < TotalLists; ++i)
- ListSizes.push_back(Record[Idx++]);
+ ListSizes.push_back(Reader->Record.readInt());
C->setComponentListSizes(ListSizes);
SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Reader->Reader.ReadSubExpr();
- ValueDecl *AssociatedDecl =
- Reader->Reader.ReadDeclAs<ValueDecl>(Reader->F, Record, Idx);
+ Expr *AssociatedExpr = Reader->Record.readSubExpr();
+ ValueDecl *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
AssociatedExpr, AssociatedDecl));
}
@@ -2318,57 +2297,57 @@ void OMPClauseReader::VisitOMPMapClause(OMPMapClause *C) {
}
void OMPClauseReader::VisitOMPNumTeamsClause(OMPNumTeamsClause *C) {
- C->setNumTeams(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setNumTeams(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPThreadLimitClause(OMPThreadLimitClause *C) {
- C->setThreadLimit(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setThreadLimit(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPPriorityClause(OMPPriorityClause *C) {
- C->setPriority(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setPriority(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPGrainsizeClause(OMPGrainsizeClause *C) {
- C->setGrainsize(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setGrainsize(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPNumTasksClause(OMPNumTasksClause *C) {
- C->setNumTasks(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setNumTasks(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPHintClause(OMPHintClause *C) {
- C->setHint(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setHint(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPDistScheduleClause(OMPDistScheduleClause *C) {
VisitOMPClauseWithPreInit(C);
C->setDistScheduleKind(
- static_cast<OpenMPDistScheduleClauseKind>(Record[Idx++]));
- C->setChunkSize(Reader->Reader.ReadSubExpr());
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setDistScheduleKindLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setCommaLoc(Reader->ReadSourceLocation(Record, Idx));
+ static_cast<OpenMPDistScheduleClauseKind>(Reader->Record.readInt()));
+ C->setChunkSize(Reader->Record.readSubExpr());
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ C->setDistScheduleKindLoc(Reader->ReadSourceLocation());
+ C->setCommaLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPDefaultmapClause(OMPDefaultmapClause *C) {
C->setDefaultmapKind(
- static_cast<OpenMPDefaultmapClauseKind>(Record[Idx++]));
+ static_cast<OpenMPDefaultmapClauseKind>(Reader->Record.readInt()));
C->setDefaultmapModifier(
- static_cast<OpenMPDefaultmapClauseModifier>(Record[Idx++]));
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setDefaultmapModifierLoc(Reader->ReadSourceLocation(Record, Idx));
- C->setDefaultmapKindLoc(Reader->ReadSourceLocation(Record, Idx));
+ static_cast<OpenMPDefaultmapClauseModifier>(Reader->Record.readInt()));
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ C->setDefaultmapModifierLoc(Reader->ReadSourceLocation());
+ C->setDefaultmapKindLoc(Reader->ReadSourceLocation());
}
void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
auto NumVars = C->varlist_size();
auto UniqueDecls = C->getUniqueDeclarationsNum();
auto TotalLists = C->getTotalComponentListNum();
@@ -2377,34 +2356,32 @@ void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
SmallVector<ValueDecl *, 16> Decls;
Decls.reserve(UniqueDecls);
for (unsigned i = 0; i < UniqueDecls; ++i)
- Decls.push_back(
- Reader->Reader.ReadDeclAs<ValueDecl>(Reader->F, Record, Idx));
+ Decls.push_back(Reader->Record.readDeclAs<ValueDecl>());
C->setUniqueDecls(Decls);
SmallVector<unsigned, 16> ListsPerDecl;
ListsPerDecl.reserve(UniqueDecls);
for (unsigned i = 0; i < UniqueDecls; ++i)
- ListsPerDecl.push_back(Record[Idx++]);
+ ListsPerDecl.push_back(Reader->Record.readInt());
C->setDeclNumLists(ListsPerDecl);
SmallVector<unsigned, 32> ListSizes;
ListSizes.reserve(TotalLists);
for (unsigned i = 0; i < TotalLists; ++i)
- ListSizes.push_back(Record[Idx++]);
+ ListSizes.push_back(Reader->Record.readInt());
C->setComponentListSizes(ListSizes);
SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Reader->Reader.ReadSubExpr();
- ValueDecl *AssociatedDecl =
- Reader->Reader.ReadDeclAs<ValueDecl>(Reader->F, Record, Idx);
+ Expr *AssociatedExpr = Reader->Record.readSubExpr();
+ ValueDecl *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
AssociatedExpr, AssociatedDecl));
}
@@ -2412,7 +2389,7 @@ void OMPClauseReader::VisitOMPToClause(OMPToClause *C) {
}
void OMPClauseReader::VisitOMPFromClause(OMPFromClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
+ C->setLParenLoc(Reader->ReadSourceLocation());
auto NumVars = C->varlist_size();
auto UniqueDecls = C->getUniqueDeclarationsNum();
auto TotalLists = C->getTotalComponentListNum();
@@ -2421,34 +2398,32 @@ void OMPClauseReader::VisitOMPFromClause(OMPFromClause *C) {
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
SmallVector<ValueDecl *, 16> Decls;
Decls.reserve(UniqueDecls);
for (unsigned i = 0; i < UniqueDecls; ++i)
- Decls.push_back(
- Reader->Reader.ReadDeclAs<ValueDecl>(Reader->F, Record, Idx));
+ Decls.push_back(Reader->Record.readDeclAs<ValueDecl>());
C->setUniqueDecls(Decls);
SmallVector<unsigned, 16> ListsPerDecl;
ListsPerDecl.reserve(UniqueDecls);
for (unsigned i = 0; i < UniqueDecls; ++i)
- ListsPerDecl.push_back(Record[Idx++]);
+ ListsPerDecl.push_back(Reader->Record.readInt());
C->setDeclNumLists(ListsPerDecl);
SmallVector<unsigned, 32> ListSizes;
ListSizes.reserve(TotalLists);
for (unsigned i = 0; i < TotalLists; ++i)
- ListSizes.push_back(Record[Idx++]);
+ ListSizes.push_back(Reader->Record.readInt());
C->setComponentListSizes(ListSizes);
SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
Components.reserve(TotalComponents);
for (unsigned i = 0; i < TotalComponents; ++i) {
- Expr *AssociatedExpr = Reader->Reader.ReadSubExpr();
- ValueDecl *AssociatedDecl =
- Reader->Reader.ReadDeclAs<ValueDecl>(Reader->F, Record, Idx);
+ Expr *AssociatedExpr = Reader->Record.readSubExpr();
+ ValueDecl *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
AssociatedExpr, AssociatedDecl));
}
@@ -2456,101 +2431,172 @@ void OMPClauseReader::VisitOMPFromClause(OMPFromClause *C) {
}
void OMPClauseReader::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- unsigned NumVars = C->varlist_size();
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Record.readSubExpr());
+ C->setPrivateCopies(Vars);
+ Vars.clear();
+ for (unsigned i = 0; i != NumVars; ++i)
+ Vars.push_back(Reader->Record.readSubExpr());
+ C->setInits(Vars);
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ Decls.push_back(Reader->Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ ListsPerDecl.push_back(Reader->Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Reader->Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned i = 0; i < TotalComponents; ++i) {
+ Expr *AssociatedExpr = Reader->Record.readSubExpr();
+ ValueDecl *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
+ Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
+ AssociatedExpr, AssociatedDecl));
+ }
+ C->setComponents(Components, ListSizes);
}
void OMPClauseReader::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
- C->setLParenLoc(Reader->ReadSourceLocation(Record, Idx));
- unsigned NumVars = C->varlist_size();
+ C->setLParenLoc(Reader->ReadSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
- Vars.push_back(Reader->Reader.ReadSubExpr());
+ Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
Vars.clear();
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ Decls.push_back(Reader->Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned i = 0; i < UniqueDecls; ++i)
+ ListsPerDecl.push_back(Reader->Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Reader->Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned i = 0; i < TotalComponents; ++i) {
+ Expr *AssociatedExpr = Reader->Record.readSubExpr();
+ ValueDecl *AssociatedDecl = Reader->Record.readDeclAs<ValueDecl>();
+ Components.push_back(OMPClauseMappableExprCommon::MappableComponent(
+ AssociatedExpr, AssociatedDecl));
+ }
+ C->setComponents(Components, ListSizes);
}
//===----------------------------------------------------------------------===//
// OpenMP Directives.
//===----------------------------------------------------------------------===//
void ASTStmtReader::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
- E->setLocStart(ReadSourceLocation(Record, Idx));
- E->setLocEnd(ReadSourceLocation(Record, Idx));
- OMPClauseReader ClauseReader(this, Reader.getContext(), Record, Idx);
+ E->setLocStart(ReadSourceLocation());
+ E->setLocEnd(ReadSourceLocation());
+ OMPClauseReader ClauseReader(this, Record);
SmallVector<OMPClause *, 5> Clauses;
for (unsigned i = 0; i < E->getNumClauses(); ++i)
Clauses.push_back(ClauseReader.readClause());
E->setClauses(Clauses);
if (E->hasAssociatedStmt())
- E->setAssociatedStmt(Reader.ReadSubStmt());
+ E->setAssociatedStmt(Record.readSubStmt());
}
void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
VisitStmt(D);
// Two fields (NumClauses and CollapsedNum) were read in ReadStmtFromStream.
- Idx += 2;
+ Record.skipInts(2);
VisitOMPExecutableDirective(D);
- D->setIterationVariable(Reader.ReadSubExpr());
- D->setLastIteration(Reader.ReadSubExpr());
- D->setCalcLastIteration(Reader.ReadSubExpr());
- D->setPreCond(Reader.ReadSubExpr());
- D->setCond(Reader.ReadSubExpr());
- D->setInit(Reader.ReadSubExpr());
- D->setInc(Reader.ReadSubExpr());
- D->setPreInits(Reader.ReadSubStmt());
+ D->setIterationVariable(Record.readSubExpr());
+ D->setLastIteration(Record.readSubExpr());
+ D->setCalcLastIteration(Record.readSubExpr());
+ D->setPreCond(Record.readSubExpr());
+ D->setCond(Record.readSubExpr());
+ D->setInit(Record.readSubExpr());
+ D->setInc(Record.readSubExpr());
+ D->setPreInits(Record.readSubStmt());
if (isOpenMPWorksharingDirective(D->getDirectiveKind()) ||
isOpenMPTaskLoopDirective(D->getDirectiveKind()) ||
isOpenMPDistributeDirective(D->getDirectiveKind())) {
- D->setIsLastIterVariable(Reader.ReadSubExpr());
- D->setLowerBoundVariable(Reader.ReadSubExpr());
- D->setUpperBoundVariable(Reader.ReadSubExpr());
- D->setStrideVariable(Reader.ReadSubExpr());
- D->setEnsureUpperBound(Reader.ReadSubExpr());
- D->setNextLowerBound(Reader.ReadSubExpr());
- D->setNextUpperBound(Reader.ReadSubExpr());
- D->setNumIterations(Reader.ReadSubExpr());
+ D->setIsLastIterVariable(Record.readSubExpr());
+ D->setLowerBoundVariable(Record.readSubExpr());
+ D->setUpperBoundVariable(Record.readSubExpr());
+ D->setStrideVariable(Record.readSubExpr());
+ D->setEnsureUpperBound(Record.readSubExpr());
+ D->setNextLowerBound(Record.readSubExpr());
+ D->setNextUpperBound(Record.readSubExpr());
+ D->setNumIterations(Record.readSubExpr());
}
if (isOpenMPLoopBoundSharingDirective(D->getDirectiveKind())) {
- D->setPrevLowerBoundVariable(Reader.ReadSubExpr());
- D->setPrevUpperBoundVariable(Reader.ReadSubExpr());
+ D->setPrevLowerBoundVariable(Record.readSubExpr());
+ D->setPrevUpperBoundVariable(Record.readSubExpr());
}
SmallVector<Expr *, 4> Sub;
unsigned CollapsedNum = D->getCollapsedNumber();
Sub.reserve(CollapsedNum);
for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Reader.ReadSubExpr());
+ Sub.push_back(Record.readSubExpr());
D->setCounters(Sub);
Sub.clear();
for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Reader.ReadSubExpr());
+ Sub.push_back(Record.readSubExpr());
D->setPrivateCounters(Sub);
Sub.clear();
for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Reader.ReadSubExpr());
+ Sub.push_back(Record.readSubExpr());
D->setInits(Sub);
Sub.clear();
for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Reader.ReadSubExpr());
+ Sub.push_back(Record.readSubExpr());
D->setUpdates(Sub);
Sub.clear();
for (unsigned i = 0; i < CollapsedNum; ++i)
- Sub.push_back(Reader.ReadSubExpr());
+ Sub.push_back(Record.readSubExpr());
D->setFinals(Sub);
}
void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setHasCancel(Record[Idx++]);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
@@ -2559,7 +2605,7 @@ void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
- D->setHasCancel(Record[Idx++]);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPForSimdDirective(OMPForSimdDirective *D) {
@@ -2569,21 +2615,21 @@ void ASTStmtReader::VisitOMPForSimdDirective(OMPForSimdDirective *D) {
void ASTStmtReader::VisitOMPSectionsDirective(OMPSectionsDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setHasCancel(Record[Idx++]);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPSectionDirective(OMPSectionDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
- D->setHasCancel(Record[Idx++]);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPSingleDirective(OMPSingleDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
@@ -2595,14 +2641,14 @@ void ASTStmtReader::VisitOMPMasterDirective(OMPMasterDirective *D) {
void ASTStmtReader::VisitOMPCriticalDirective(OMPCriticalDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
- ReadDeclarationNameInfo(D->DirName, Record, Idx);
+ ReadDeclarationNameInfo(D->DirName);
}
void ASTStmtReader::VisitOMPParallelForDirective(OMPParallelForDirective *D) {
VisitOMPLoopDirective(D);
- D->setHasCancel(Record[Idx++]);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPParallelForSimdDirective(
@@ -2614,17 +2660,17 @@ void ASTStmtReader::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setHasCancel(Record[Idx++]);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPTaskDirective(OMPTaskDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setHasCancel(Record[Idx++]);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPTaskyieldDirective(OMPTaskyieldDirective *D) {
@@ -2650,74 +2696,74 @@ void ASTStmtReader::VisitOMPTaskgroupDirective(OMPTaskgroupDirective *D) {
void ASTStmtReader::VisitOMPFlushDirective(OMPFlushDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setX(Reader.ReadSubExpr());
- D->setV(Reader.ReadSubExpr());
- D->setExpr(Reader.ReadSubExpr());
- D->setUpdateExpr(Reader.ReadSubExpr());
- D->IsXLHSInRHSPart = Record[Idx++] != 0;
- D->IsPostfixUpdate = Record[Idx++] != 0;
+ D->setX(Record.readSubExpr());
+ D->setV(Record.readSubExpr());
+ D->setExpr(Record.readSubExpr());
+ D->setUpdateExpr(Record.readSubExpr());
+ D->IsXLHSInRHSPart = Record.readInt() != 0;
+ D->IsPostfixUpdate = Record.readInt() != 0;
}
void ASTStmtReader::VisitOMPTargetDirective(OMPTargetDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPTargetDataDirective(OMPTargetDataDirective *D) {
VisitStmt(D);
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPTargetEnterDataDirective(
OMPTargetEnterDataDirective *D) {
VisitStmt(D);
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPTargetExitDataDirective(
OMPTargetExitDataDirective *D) {
VisitStmt(D);
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPTargetParallelDirective(
OMPTargetParallelDirective *D) {
VisitStmt(D);
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPTargetParallelForDirective(
OMPTargetParallelForDirective *D) {
VisitOMPLoopDirective(D);
- D->setHasCancel(Record[Idx++]);
+ D->setHasCancel(Record.readInt());
}
void ASTStmtReader::VisitOMPTeamsDirective(OMPTeamsDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
@@ -2725,15 +2771,15 @@ void ASTStmtReader::VisitOMPCancellationPointDirective(
OMPCancellationPointDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
- D->setCancelRegion(static_cast<OpenMPDirectiveKind>(Record[Idx++]));
+ D->setCancelRegion(static_cast<OpenMPDirectiveKind>(Record.readInt()));
}
void ASTStmtReader::VisitOMPCancelDirective(OMPCancelDirective *D) {
VisitStmt(D);
// The NumClauses field was read in ReadStmtFromStream.
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
- D->setCancelRegion(static_cast<OpenMPDirectiveKind>(Record[Idx++]));
+ D->setCancelRegion(static_cast<OpenMPDirectiveKind>(Record.readInt()));
}
void ASTStmtReader::VisitOMPTaskLoopDirective(OMPTaskLoopDirective *D) {
@@ -2750,7 +2796,7 @@ void ASTStmtReader::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
void ASTStmtReader::VisitOMPTargetUpdateDirective(OMPTargetUpdateDirective *D) {
VisitStmt(D);
- ++Idx;
+ Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
void ASTStmtReader::VisitOMPDistributeParallelForDirective(
@@ -2773,6 +2819,47 @@ void ASTStmtReader::VisitOMPTargetParallelForSimdDirective(
VisitOMPLoopDirective(D);
}
+void ASTStmtReader::VisitOMPTargetSimdDirective(OMPTargetSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTeamsDistributeDirective(
+ OMPTeamsDistributeDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTeamsDistributeSimdDirective(
+ OMPTeamsDistributeSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTeamsDistributeParallelForSimdDirective(
+ OMPTeamsDistributeParallelForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTeamsDistributeParallelForDirective(
+ OMPTeamsDistributeParallelForDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTargetTeamsDirective(OMPTargetTeamsDirective *D) {
+ VisitStmt(D);
+ // The NumClauses field was read in ReadStmtFromStream.
+ Record.skipInts(1);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTargetTeamsDistributeDirective(
+ OMPTargetTeamsDistributeDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTargetTeamsDistributeParallelForDirective(
+ OMPTargetTeamsDistributeParallelForDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
//===----------------------------------------------------------------------===//
// ASTReader Implementation
//===----------------------------------------------------------------------===//
@@ -2810,7 +2897,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
ReadingKindTracker ReadingKind(Read_Stmt, *this);
llvm::BitstreamCursor &Cursor = F.DeclsCursor;
-
+
// Map of offset to previously deserialized stmt. The offset points
/// just after the stmt record.
llvm::DenseMap<uint64_t, Stmt *> StmtEntries;
@@ -2819,14 +2906,13 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
unsigned PrevNumStmts = StmtStack.size();
#endif
- RecordData Record;
- unsigned Idx;
- ASTStmtReader Reader(*this, F, Cursor, Record, Idx);
+ ASTRecordReader Record(*this, F);
+ ASTStmtReader Reader(Record, Cursor);
Stmt::EmptyShell Empty;
while (true) {
llvm::BitstreamEntry Entry = Cursor.advanceSkippingSubblocks();
-
+
switch (Entry.Kind) {
case llvm::BitstreamEntry::SubBlock: // Handled for us already.
case llvm::BitstreamEntry::Error:
@@ -2840,11 +2926,9 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
}
Stmt *S = nullptr;
- Idx = 0;
- Record.clear();
bool Finished = false;
bool IsStmtReference = false;
- switch ((StmtCode)Cursor.readRecord(Entry.ID, Record)) {
+ switch ((StmtCode)Record.readRecord(Cursor, Entry.ID)) {
case STMT_STOP:
Finished = true;
break;
@@ -2853,7 +2937,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
IsStmtReference = true;
assert(StmtEntries.find(Record[0]) != StmtEntries.end() &&
"No stmt was recorded for this offset reference!");
- S = StmtEntries[Record[Idx++]];
+ S = StmtEntries[Record.readInt()];
break;
case STMT_NULL_PTR:
@@ -2991,11 +3075,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
case EXPR_OFFSETOF:
- S = OffsetOfExpr::CreateEmpty(Context,
+ S = OffsetOfExpr::CreateEmpty(Context,
Record[ASTStmtReader::NumExprFields],
Record[ASTStmtReader::NumExprFields + 1]);
break;
-
+
case EXPR_SIZEOF_ALIGN_OF:
S = new (Context) UnaryExprOrTypeTraitExpr(Empty);
break;
@@ -3017,46 +3101,46 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
// That way we can use MemberExpr::Create and don't have to duplicate its
// logic with a MemberExpr::CreateEmpty.
- assert(Idx == 0);
+ assert(Record.getIdx() == 0);
NestedNameSpecifierLoc QualifierLoc;
- if (Record[Idx++]) { // HasQualifier.
- QualifierLoc = ReadNestedNameSpecifierLoc(F, Record, Idx);
+ if (Record.readInt()) { // HasQualifier.
+ QualifierLoc = Record.readNestedNameSpecifierLoc();
}
SourceLocation TemplateKWLoc;
TemplateArgumentListInfo ArgInfo;
- bool HasTemplateKWAndArgsInfo = Record[Idx++];
+ bool HasTemplateKWAndArgsInfo = Record.readInt();
if (HasTemplateKWAndArgsInfo) {
- TemplateKWLoc = ReadSourceLocation(F, Record, Idx);
- unsigned NumTemplateArgs = Record[Idx++];
- ArgInfo.setLAngleLoc(ReadSourceLocation(F, Record, Idx));
- ArgInfo.setRAngleLoc(ReadSourceLocation(F, Record, Idx));
+ TemplateKWLoc = Record.readSourceLocation();
+ unsigned NumTemplateArgs = Record.readInt();
+ ArgInfo.setLAngleLoc(Record.readSourceLocation());
+ ArgInfo.setRAngleLoc(Record.readSourceLocation());
for (unsigned i = 0; i != NumTemplateArgs; ++i)
- ArgInfo.addArgument(ReadTemplateArgumentLoc(F, Record, Idx));
+ ArgInfo.addArgument(Record.readTemplateArgumentLoc());
}
- bool HadMultipleCandidates = Record[Idx++];
+ bool HadMultipleCandidates = Record.readInt();
- NamedDecl *FoundD = ReadDeclAs<NamedDecl>(F, Record, Idx);
- AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ NamedDecl *FoundD = Record.readDeclAs<NamedDecl>();
+ AccessSpecifier AS = (AccessSpecifier)Record.readInt();
DeclAccessPair FoundDecl = DeclAccessPair::make(FoundD, AS);
- QualType T = readType(F, Record, Idx);
- ExprValueKind VK = static_cast<ExprValueKind>(Record[Idx++]);
- ExprObjectKind OK = static_cast<ExprObjectKind>(Record[Idx++]);
+ QualType T = Record.readType();
+ ExprValueKind VK = static_cast<ExprValueKind>(Record.readInt());
+ ExprObjectKind OK = static_cast<ExprObjectKind>(Record.readInt());
Expr *Base = ReadSubExpr();
- ValueDecl *MemberD = ReadDeclAs<ValueDecl>(F, Record, Idx);
- SourceLocation MemberLoc = ReadSourceLocation(F, Record, Idx);
+ ValueDecl *MemberD = Record.readDeclAs<ValueDecl>();
+ SourceLocation MemberLoc = Record.readSourceLocation();
DeclarationNameInfo MemberNameInfo(MemberD->getDeclName(), MemberLoc);
- bool IsArrow = Record[Idx++];
- SourceLocation OperatorLoc = ReadSourceLocation(F, Record, Idx);
+ bool IsArrow = Record.readInt();
+ SourceLocation OperatorLoc = Record.readSourceLocation();
S = MemberExpr::Create(Context, Base, IsArrow, OperatorLoc, QualifierLoc,
TemplateKWLoc, MemberD, FoundDecl, MemberNameInfo,
HasTemplateKWAndArgsInfo ? &ArgInfo : nullptr, T,
VK, OK);
- ReadDeclarationNameLoc(F, cast<MemberExpr>(S)->MemberDNLoc,
- MemberD->getDeclName(), Record, Idx);
+ Record.readDeclarationNameLoc(cast<MemberExpr>(S)->MemberDNLoc,
+ MemberD->getDeclName());
if (HadMultipleCandidates)
cast<MemberExpr>(S)->setHadMultipleCandidates(true);
break;
@@ -3118,6 +3202,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) NoInitExpr(Empty);
break;
+ case EXPR_ARRAY_INIT_LOOP:
+ S = new (Context) ArrayInitLoopExpr(Empty);
+ break;
+
+ case EXPR_ARRAY_INIT_INDEX:
+ S = new (Context) ArrayInitIndexExpr(Empty);
+ break;
+
case EXPR_VA_ARG:
S = new (Context) VAArgExpr(Empty);
break;
@@ -3213,7 +3305,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = new (Context) ObjCAtFinallyStmt(Empty);
break;
case STMT_OBJC_AT_TRY:
- S = ObjCAtTryStmt::CreateEmpty(Context,
+ S = ObjCAtTryStmt::CreateEmpty(Context,
Record[ASTStmtReader::NumStmtFields],
Record[ASTStmtReader::NumStmtFields + 1]);
break;
@@ -3484,6 +3576,68 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_TARGET_SIMD_DIRECTIVE: {
+ auto NumClauses = Record[ASTStmtReader::NumStmtFields];
+ auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTargetSimdDirective::CreateEmpty(Context, NumClauses, CollapsedNum,
+ Empty);
+ break;
+ }
+
+ case STMT_OMP_TEAMS_DISTRIBUTE_DIRECTIVE: {
+ auto NumClauses = Record[ASTStmtReader::NumStmtFields];
+ auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTeamsDistributeDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE: {
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields];
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTeamsDistributeSimdDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE: {
+ auto NumClauses = Record[ASTStmtReader::NumStmtFields];
+ auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTeamsDistributeParallelForSimdDirective::CreateEmpty(
+ Context, NumClauses, CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE: {
+ auto NumClauses = Record[ASTStmtReader::NumStmtFields];
+ auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTeamsDistributeParallelForDirective::CreateEmpty(
+ Context, NumClauses, CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_TARGET_TEAMS_DIRECTIVE: {
+ S = OMPTargetTeamsDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+ }
+
+ case STMT_OMP_TARGET_TEAMS_DISTRIBUTE_DIRECTIVE: {
+ auto NumClauses = Record[ASTStmtReader::NumStmtFields];
+ auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTargetTeamsDistributeDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE: {
+ auto NumClauses = Record[ASTStmtReader::NumStmtFields];
+ auto CollapsedNum = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTargetTeamsDistributeParallelForDirective::CreateEmpty(
+ Context, NumClauses, CollapsedNum, Empty);
+ break;
+ }
+
case EXPR_CXX_OPERATOR_CALL:
S = new (Context) CXXOperatorCallExpr(Context, Empty);
break;
@@ -3576,7 +3730,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CXX_BIND_TEMPORARY:
S = new (Context) CXXBindTemporaryExpr(Empty);
break;
-
+
case EXPR_CXX_SCALAR_VALUE_INIT:
S = new (Context) CXXScalarValueInitExpr(Empty);
break;
@@ -3589,54 +3743,54 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CXX_PSEUDO_DESTRUCTOR:
S = new (Context) CXXPseudoDestructorExpr(Empty);
break;
-
+
case EXPR_EXPR_WITH_CLEANUPS:
S = ExprWithCleanups::Create(Context, Empty,
Record[ASTStmtReader::NumExprFields]);
break;
-
+
case EXPR_CXX_DEPENDENT_SCOPE_MEMBER:
S = CXXDependentScopeMemberExpr::CreateEmpty(Context,
/*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
/*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
- ? Record[ASTStmtReader::NumExprFields + 1]
+ ? Record[ASTStmtReader::NumExprFields + 1]
: 0);
break;
-
+
case EXPR_CXX_DEPENDENT_SCOPE_DECL_REF:
S = DependentScopeDeclRefExpr::CreateEmpty(Context,
/*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
/*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
- ? Record[ASTStmtReader::NumExprFields + 1]
+ ? Record[ASTStmtReader::NumExprFields + 1]
: 0);
break;
-
+
case EXPR_CXX_UNRESOLVED_CONSTRUCT:
S = CXXUnresolvedConstructExpr::CreateEmpty(Context,
/*NumArgs=*/Record[ASTStmtReader::NumExprFields]);
break;
-
+
case EXPR_CXX_UNRESOLVED_MEMBER:
S = UnresolvedMemberExpr::CreateEmpty(Context,
/*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
/*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
- ? Record[ASTStmtReader::NumExprFields + 1]
+ ? Record[ASTStmtReader::NumExprFields + 1]
: 0);
break;
-
+
case EXPR_CXX_UNRESOLVED_LOOKUP:
S = UnresolvedLookupExpr::CreateEmpty(Context,
/*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
/*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
- ? Record[ASTStmtReader::NumExprFields + 1]
+ ? Record[ASTStmtReader::NumExprFields + 1]
: 0);
break;
case EXPR_TYPE_TRAIT:
- S = TypeTraitExpr::CreateDeserialized(Context,
+ S = TypeTraitExpr::CreateDeserialized(Context,
Record[ASTStmtReader::NumExprFields]);
break;
-
+
case EXPR_ARRAY_TYPE_TRAIT:
S = new (Context) ArrayTypeTraitExpr(Empty);
break;
@@ -3652,17 +3806,17 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_PACK_EXPANSION:
S = new (Context) PackExpansionExpr(Empty);
break;
-
+
case EXPR_SIZEOF_PACK:
S = SizeOfPackExpr::CreateDeserialized(
Context,
/*NumPartialArgs=*/Record[ASTStmtReader::NumExprFields]);
break;
-
+
case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM:
S = new (Context) SubstNonTypeTemplateParmExpr(Empty);
break;
-
+
case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK:
S = new (Context) SubstNonTypeTemplateParmPackExpr(Empty);
break;
@@ -3671,7 +3825,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
S = FunctionParmPackExpr::CreateEmpty(Context,
Record[ASTStmtReader::NumExprFields]);
break;
-
+
case EXPR_MATERIALIZE_TEMPORARY:
S = new (Context) MaterializeTemporaryExpr(Empty);
break;
@@ -3687,7 +3841,7 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_CUDA_KERNEL_CALL:
S = new (Context) CUDAKernelCallExpr(Context, Empty);
break;
-
+
case EXPR_ASTYPE:
S = new (Context) AsTypeExpr(Empty);
break;
@@ -3701,16 +3855,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case EXPR_ATOMIC:
S = new (Context) AtomicExpr(Empty);
break;
-
+
case EXPR_LAMBDA: {
unsigned NumCaptures = Record[ASTStmtReader::NumExprFields];
- unsigned NumArrayIndexVars = Record[ASTStmtReader::NumExprFields + 1];
- S = LambdaExpr::CreateDeserialized(Context, NumCaptures,
- NumArrayIndexVars);
+ S = LambdaExpr::CreateDeserialized(Context, NumCaptures);
break;
}
}
-
+
// We hit a STMT_STOP, so we're done with this expression.
if (Finished)
break;
@@ -3722,8 +3874,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
StmtEntries[Cursor.GetCurrentBitNo()] = S;
}
-
- assert(Idx == Record.size() && "Invalid deserialization of statement");
+ assert(Record.getIdx() == Record.size() &&
+ "Invalid deserialization of statement");
StmtStack.push_back(S);
}
Done:
diff --git a/lib/Serialization/ASTWriter.cpp b/lib/Serialization/ASTWriter.cpp
index 7589b0c5dd52..6d79ea53b659 100644
--- a/lib/Serialization/ASTWriter.cpp
+++ b/lib/Serialization/ASTWriter.cpp
@@ -12,23 +12,31 @@
//===----------------------------------------------------------------------===//
#include "clang/Serialization/ASTWriter.h"
-#include "clang/Serialization/ModuleFileExtension.h"
#include "ASTCommon.h"
#include "ASTReaderInternals.h"
#include "MultiOnDiskHashTable.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTUnresolvedSet.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
-#include "clang/AST/DeclLookups.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/LambdaCapture.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/RawCommentList.h"
+#include "clang/AST/TemplateName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLocVisitor.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
-#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Basic/FileSystemOptions.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/SourceManagerInternals.h"
#include "clang/Basic/TargetInfo.h"
@@ -38,28 +46,48 @@
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/ModuleMap.h"
#include "clang/Lex/PreprocessingRecord.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Lex/Token.h"
#include "clang/Sema/IdentifierResolver.h"
+#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Sema.h"
+#include "clang/Sema/Weak.h"
#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/Module.h"
+#include "clang/Serialization/ModuleFileExtension.h"
#include "clang/Serialization/SerializationDiagnostic.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Bitcode/BitCodes.h"
#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/Compression.h"
#include "llvm/Support/EndianStream.h"
-#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
#include <algorithm>
-#include <cstdio>
-#include <string.h>
+#include <cassert>
+#include <cstdint>
+#include <cstdlib>
+#include <cstring>
+#include <deque>
+#include <limits>
+#include <new>
+#include <tuple>
#include <utility>
using namespace clang;
@@ -83,6 +111,7 @@ static StringRef bytes(const SmallVectorImpl<T> &v) {
//===----------------------------------------------------------------------===//
namespace clang {
+
class ASTTypeWriter {
ASTWriter &Writer;
ASTRecordWriter Record;
@@ -127,6 +156,7 @@ namespace clang {
#define ABSTRACT_TYPE(Class, Base)
#include "clang/AST/TypeNodes.def"
};
+
} // end namespace clang
void ASTTypeWriter::VisitBuiltinType(const BuiltinType *T) {
@@ -451,6 +481,14 @@ void ASTTypeWriter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
Code = TYPE_OBJC_INTERFACE;
}
+void ASTTypeWriter::VisitObjCTypeParamType(const ObjCTypeParamType *T) {
+ Record.AddDeclRef(T->getDecl());
+ Record.push_back(T->getNumProtocols());
+ for (const auto *I : T->quals())
+ Record.AddDeclRef(I);
+ Code = TYPE_OBJC_TYPE_PARAM;
+}
+
void ASTTypeWriter::VisitObjCObjectType(const ObjCObjectType *T) {
Record.AddTypeRef(T->getBaseType());
Record.push_back(T->getTypeArgsAsWritten().size());
@@ -478,6 +516,7 @@ ASTTypeWriter::VisitAtomicType(const AtomicType *T) {
void
ASTTypeWriter::VisitPipeType(const PipeType *T) {
Record.AddTypeRef(T->getElementType());
+ Record.push_back(T->isReadOnly());
Code = TYPE_PIPE;
}
@@ -504,6 +543,7 @@ public:
void TypeLocWriter::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
// nothing to do
}
+
void TypeLocWriter::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
Record.AddSourceLocation(TL.getBuiltinLoc());
if (TL.needsExtraLocalData()) {
@@ -513,31 +553,40 @@ void TypeLocWriter::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
Record.push_back(TL.hasModeAttr());
}
}
+
void TypeLocWriter::VisitComplexTypeLoc(ComplexTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitPointerTypeLoc(PointerTypeLoc TL) {
Record.AddSourceLocation(TL.getStarLoc());
}
+
void TypeLocWriter::VisitDecayedTypeLoc(DecayedTypeLoc TL) {
// nothing to do
}
+
void TypeLocWriter::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
// nothing to do
}
+
void TypeLocWriter::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
Record.AddSourceLocation(TL.getCaretLoc());
}
+
void TypeLocWriter::VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
Record.AddSourceLocation(TL.getAmpLoc());
}
+
void TypeLocWriter::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
Record.AddSourceLocation(TL.getAmpAmpLoc());
}
+
void TypeLocWriter::VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
Record.AddSourceLocation(TL.getStarLoc());
Record.AddTypeSourceInfo(TL.getClassTInfo());
}
+
void TypeLocWriter::VisitArrayTypeLoc(ArrayTypeLoc TL) {
Record.AddSourceLocation(TL.getLBracketLoc());
Record.AddSourceLocation(TL.getRBracketLoc());
@@ -545,29 +594,37 @@ void TypeLocWriter::VisitArrayTypeLoc(ArrayTypeLoc TL) {
if (TL.getSizeExpr())
Record.AddStmt(TL.getSizeExpr());
}
+
void TypeLocWriter::VisitConstantArrayTypeLoc(ConstantArrayTypeLoc TL) {
VisitArrayTypeLoc(TL);
}
+
void TypeLocWriter::VisitIncompleteArrayTypeLoc(IncompleteArrayTypeLoc TL) {
VisitArrayTypeLoc(TL);
}
+
void TypeLocWriter::VisitVariableArrayTypeLoc(VariableArrayTypeLoc TL) {
VisitArrayTypeLoc(TL);
}
+
void TypeLocWriter::VisitDependentSizedArrayTypeLoc(
DependentSizedArrayTypeLoc TL) {
VisitArrayTypeLoc(TL);
}
+
void TypeLocWriter::VisitDependentSizedExtVectorTypeLoc(
DependentSizedExtVectorTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitVectorTypeLoc(VectorTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
Record.AddSourceLocation(TL.getLocalRangeBegin());
Record.AddSourceLocation(TL.getLParenLoc());
@@ -588,35 +645,50 @@ void TypeLocWriter::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
void TypeLocWriter::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+void TypeLocWriter::VisitObjCTypeParamTypeLoc(ObjCTypeParamTypeLoc TL) {
+ if (TL.getNumProtocols()) {
+ Record.AddSourceLocation(TL.getProtocolLAngleLoc());
+ Record.AddSourceLocation(TL.getProtocolRAngleLoc());
+ }
+ for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
+ Record.AddSourceLocation(TL.getProtocolLoc(i));
+}
void TypeLocWriter::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
Record.AddSourceLocation(TL.getTypeofLoc());
Record.AddSourceLocation(TL.getLParenLoc());
Record.AddSourceLocation(TL.getRParenLoc());
}
+
void TypeLocWriter::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
Record.AddSourceLocation(TL.getTypeofLoc());
Record.AddSourceLocation(TL.getLParenLoc());
Record.AddSourceLocation(TL.getRParenLoc());
Record.AddTypeSourceInfo(TL.getUnderlyingTInfo());
}
+
void TypeLocWriter::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
Record.AddSourceLocation(TL.getKWLoc());
Record.AddSourceLocation(TL.getLParenLoc());
Record.AddSourceLocation(TL.getRParenLoc());
Record.AddTypeSourceInfo(TL.getUnderlyingTInfo());
}
+
void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitRecordTypeLoc(RecordTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
Record.AddSourceLocation(TL.getAttrNameLoc());
if (TL.hasAttrOperand()) {
@@ -632,17 +704,21 @@ void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
Record.AddSourceLocation(TL.getAttrEnumOperandLoc());
}
}
+
void TypeLocWriter::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitSubstTemplateTypeParmTypeLoc(
SubstTemplateTypeParmTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitSubstTemplateTypeParmPackTypeLoc(
SubstTemplateTypeParmPackTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
Record.AddSourceLocation(TL.getTemplateKeywordLoc());
@@ -653,22 +729,27 @@ void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
Record.AddTemplateArgumentLocInfo(TL.getArgLoc(i).getArgument().getKind(),
TL.getArgLoc(i).getLocInfo());
}
+
void TypeLocWriter::VisitParenTypeLoc(ParenTypeLoc TL) {
Record.AddSourceLocation(TL.getLParenLoc());
Record.AddSourceLocation(TL.getRParenLoc());
}
+
void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
Record.AddSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
}
+
void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
Record.AddSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
Record.AddSourceLocation(TL.getElaboratedKeywordLoc());
@@ -681,12 +762,15 @@ void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
Record.AddTemplateArgumentLocInfo(TL.getArgLoc(I).getArgument().getKind(),
TL.getArgLoc(I).getLocInfo());
}
+
void TypeLocWriter::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
Record.AddSourceLocation(TL.getEllipsisLoc());
}
+
void TypeLocWriter::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
Record.AddSourceLocation(TL.getNameLoc());
}
+
void TypeLocWriter::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
Record.push_back(TL.hasBaseTypeAsWritten());
Record.AddSourceLocation(TL.getTypeArgsLAngleLoc());
@@ -698,14 +782,17 @@ void TypeLocWriter::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
Record.AddSourceLocation(TL.getProtocolLoc(i));
}
+
void TypeLocWriter::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
Record.AddSourceLocation(TL.getStarLoc());
}
+
void TypeLocWriter::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
Record.AddSourceLocation(TL.getKWLoc());
Record.AddSourceLocation(TL.getLParenLoc());
Record.AddSourceLocation(TL.getRParenLoc());
}
+
void TypeLocWriter::VisitPipeTypeLoc(PipeTypeLoc TL) {
Record.AddSourceLocation(TL.getKWLoc());
}
@@ -905,7 +992,7 @@ static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
void ASTWriter::WriteBlockInfoBlock() {
RecordData Record;
- Stream.EnterSubblock(llvm::bitc::BLOCKINFO_BLOCK_ID, 3);
+ Stream.EnterBlockInfoBlock();
#define BLOCK(X) EmitBlockID(X ## _ID, #X, Stream, Record)
#define RECORD(X) EmitRecordID(X, #X, Stream, Record)
@@ -966,6 +1053,8 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(HEADER_SEARCH_TABLE);
RECORD(FP_PRAGMA_OPTIONS);
RECORD(OPENCL_EXTENSIONS);
+ RECORD(OPENCL_EXTENSION_TYPES);
+ RECORD(OPENCL_EXTENSION_DECLS);
RECORD(DELEGATING_CTORS);
RECORD(KNOWN_NAMESPACES);
RECORD(MODULE_OFFSET_MAP);
@@ -983,6 +1072,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(POINTERS_TO_MEMBERS_PRAGMA_OPTIONS);
RECORD(UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES);
RECORD(DELETE_EXPRS_TO_ANALYZE);
+ RECORD(CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -1018,6 +1108,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(SUBMODULE_PRIVATE_HEADER);
RECORD(SUBMODULE_TEXTUAL_HEADER);
RECORD(SUBMODULE_PRIVATE_TEXTUAL_HEADER);
+ RECORD(SUBMODULE_INITIALIZERS);
// Comments Block.
BLOCK(COMMENTS_BLOCK);
@@ -1066,6 +1157,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(TYPE_ATOMIC);
RECORD(TYPE_DECAYED);
RECORD(TYPE_ADJUSTED);
+ RECORD(TYPE_OBJC_TYPE_PARAM);
RECORD(LOCAL_REDECLARATIONS);
RECORD(DECL_TYPEDEF);
RECORD(DECL_TYPEALIAS);
@@ -1212,7 +1304,7 @@ adjustFilenameForRelocatableAST(const char *Filename, StringRef BaseDir) {
}
static ASTFileSignature getSignature() {
- while (1) {
+ while (true) {
if (ASTFileSignature S = llvm::sys::Process::GetRandomNumber())
return S;
// Rely on GetRandomNumber to eventually return non-zero...
@@ -1534,6 +1626,7 @@ uint64_t ASTWriter::WriteControlBlock(Preprocessor &PP,
}
namespace {
+
/// \brief An input file.
struct InputFileEntry {
const FileEntry *File;
@@ -1541,6 +1634,7 @@ namespace {
bool IsTransient;
bool BufferOverridden;
};
+
} // end anonymous namespace
void ASTWriter::WriteInputFiles(SourceManager &SourceMgr,
@@ -1701,6 +1795,7 @@ static unsigned CreateSLocExpansionAbbrev(llvm::BitstreamWriter &Stream) {
}
namespace {
+
// Trait used for the on-disk hash table of header search information.
class HeaderFileInfoTrait {
ASTWriter &Writer;
@@ -1716,7 +1811,7 @@ namespace {
struct key_type {
const FileEntry *FE;
- const char *Filename;
+ StringRef Filename;
};
typedef const key_type &key_type_ref;
@@ -1737,7 +1832,7 @@ namespace {
EmitKeyDataLength(raw_ostream& Out, key_type_ref key, data_type_ref Data) {
using namespace llvm::support;
endian::Writer<little> LE(Out);
- unsigned KeyLen = strlen(key.Filename) + 1 + 8 + 8;
+ unsigned KeyLen = key.Filename.size() + 1 + 8 + 8;
LE.write<uint16_t>(KeyLen);
unsigned DataLen = 1 + 2 + 4 + 4;
for (auto ModInfo : HS.getModuleMap().findAllModulesForHeader(key.FE))
@@ -1754,7 +1849,7 @@ namespace {
KeyLen -= 8;
LE.write<uint64_t>(Writer.getTimestampForOutput(key.FE));
KeyLen -= 8;
- Out.write(key.Filename, KeyLen);
+ Out.write(key.Filename.data(), KeyLen);
}
void EmitData(raw_ostream &Out, key_type_ref key,
@@ -1809,6 +1904,7 @@ namespace {
const char *strings_begin() const { return FrameworkStringData.begin(); }
const char *strings_end() const { return FrameworkStringData.end(); }
};
+
} // end anonymous namespace
/// \brief Write the header search block for the list of files that
@@ -1842,13 +1938,13 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
continue;
// Massage the file path into an appropriate form.
- const char *Filename = File->getName();
+ StringRef Filename = File->getName();
SmallString<128> FilenameTmp(Filename);
if (PreparePathForOutput(FilenameTmp)) {
// If we performed any translation on the file name at all, we need to
// save this string, since the generator will refer to it later.
- Filename = strdup(FilenameTmp.c_str());
- SavedStrings.push_back(Filename);
+ Filename = StringRef(strdup(FilenameTmp.c_str()));
+ SavedStrings.push_back(Filename.data());
}
HeaderFileInfoTrait::key_type key = { File, Filename };
@@ -1982,14 +2078,13 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// the reader side).
const llvm::MemoryBuffer *Buffer
= Content->getBuffer(PP.getDiagnostics(), PP.getSourceManager());
- const char *Name = Buffer->getBufferIdentifier();
+ StringRef Name = Buffer->getBufferIdentifier();
Stream.EmitRecordWithBlob(SLocBufferAbbrv, Record,
- StringRef(Name, strlen(Name) + 1));
+ StringRef(Name.data(), Name.size() + 1));
EmitBlob = true;
- if (strcmp(Name, "<built-in>") == 0) {
+ if (Name == "<built-in>")
PreloadSLocs.push_back(SLocEntryOffsets.size());
- }
}
if (EmitBlob) {
@@ -2418,7 +2513,9 @@ unsigned ASTWriter::getLocalOrImportedSubmoduleID(Module *Mod) {
if (Known != SubmoduleIDs.end())
return Known->second;
- if (Mod->getTopLevelModule() != WritingModule)
+ auto *Top = Mod->getTopLevelModule();
+ if (Top != WritingModule &&
+ !Top->fullModuleNameIs(StringRef(getLangOpts().CurrentModule)))
return 0;
return SubmoduleIDs[Mod] = NextSubmoduleID++;
@@ -2650,6 +2747,13 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Stream.EmitRecordWithBlob(ConfigMacroAbbrev, Record, CM);
}
+ // Emit the initializers, if any.
+ RecordData Inits;
+ for (Decl *D : Context->getModuleInitializers(Mod))
+ Inits.push_back(GetDeclRef(D));
+ if (!Inits.empty())
+ Stream.EmitRecord(SUBMODULE_INITIALIZERS, Inits);
+
// Queue up the submodules of this module.
for (auto *M : Mod->submodules())
Q.push(M);
@@ -2860,6 +2964,7 @@ void ASTWriter::WriteComments() {
//===----------------------------------------------------------------------===//
namespace {
+
// Trait used for the on-disk hash table used in the method pool.
class ASTMethodPoolTrait {
ASTWriter &Writer;
@@ -2964,6 +3069,7 @@ public:
assert(Out.tell() - Start == DataLen && "Data length is wrong");
}
};
+
} // end anonymous namespace
/// \brief Write ObjC data: selectors and the method pool.
@@ -3135,6 +3241,7 @@ static NamedDecl *getDeclForLocalLookup(const LangOptions &LangOpts,
}
namespace {
+
class ASTIdentifierTableTrait {
ASTWriter &Writer;
Preprocessor &PP;
@@ -3185,6 +3292,7 @@ public:
auto MacroOffset = Writer.getMacroDirectivesOffset(II);
return isInterestingIdentifier(II, MacroOffset);
}
+
bool isInterestingNonMacroIdentifier(const IdentifierInfo *II) {
return isInterestingIdentifier(II, 0);
}
@@ -3278,6 +3386,7 @@ public:
}
}
};
+
} // end anonymous namespace
/// \brief Write the identifier table into the AST file.
@@ -3384,6 +3493,7 @@ void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
//===----------------------------------------------------------------------===//
namespace {
+
// Trait used for the on-disk hash table used in the method pool.
class ASTDeclContextNameLookupTrait {
ASTWriter &Writer;
@@ -3509,6 +3619,7 @@ public:
assert(Out.tell() - Start == DataLen && "Data length is wrong");
}
};
+
} // end anonymous namespace
bool ASTWriter::isLookupResultExternal(StoredDeclsList &Result,
@@ -3830,11 +3941,53 @@ void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
const OpenCLOptions &Opts = SemaRef.getOpenCLOptions();
RecordData Record;
-#define OPENCLEXT(nm) Record.push_back(Opts.nm);
-#include "clang/Basic/OpenCLExtensions.def"
+ for (const auto &I:Opts.OptMap) {
+ AddString(I.getKey(), Record);
+ auto V = I.getValue();
+ Record.push_back(V.Supported ? 1 : 0);
+ Record.push_back(V.Enabled ? 1 : 0);
+ Record.push_back(V.Avail);
+ Record.push_back(V.Core);
+ }
Stream.EmitRecord(OPENCL_EXTENSIONS, Record);
}
+void ASTWriter::WriteOpenCLExtensionTypes(Sema &SemaRef) {
+ if (!SemaRef.Context.getLangOpts().OpenCL)
+ return;
+
+ RecordData Record;
+ for (const auto &I : SemaRef.OpenCLTypeExtMap) {
+ Record.push_back(
+ static_cast<unsigned>(getTypeID(I.first->getCanonicalTypeInternal())));
+ Record.push_back(I.second.size());
+ for (auto Ext : I.second)
+ AddString(Ext, Record);
+ }
+ Stream.EmitRecord(OPENCL_EXTENSION_TYPES, Record);
+}
+
+void ASTWriter::WriteOpenCLExtensionDecls(Sema &SemaRef) {
+ if (!SemaRef.Context.getLangOpts().OpenCL)
+ return;
+
+ RecordData Record;
+ for (const auto &I : SemaRef.OpenCLDeclExtMap) {
+ Record.push_back(getDeclID(I.first));
+ Record.push_back(static_cast<unsigned>(I.second.size()));
+ for (auto Ext : I.second)
+ AddString(Ext, Record);
+ }
+ Stream.EmitRecord(OPENCL_EXTENSION_DECLS, Record);
+}
+
+void ASTWriter::WriteCUDAPragmas(Sema &SemaRef) {
+ if (SemaRef.ForceCUDAHostDeviceDepth > 0) {
+ RecordData::value_type Record[] = {SemaRef.ForceCUDAHostDeviceDepth};
+ Stream.EmitRecord(CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH, Record);
+ }
+}
+
void ASTWriter::WriteObjCCategories() {
SmallVector<ObjCCategoriesInfo, 2> CategoriesMap;
RecordData Categories;
@@ -3894,14 +4047,14 @@ void ASTWriter::WriteLateParsedTemplates(Sema &SemaRef) {
return;
RecordData Record;
- for (auto LPTMapEntry : LPTMap) {
+ for (auto &LPTMapEntry : LPTMap) {
const FunctionDecl *FD = LPTMapEntry.first;
- LateParsedTemplate *LPT = LPTMapEntry.second;
+ LateParsedTemplate &LPT = *LPTMapEntry.second;
AddDeclRef(FD, Record);
- AddDeclRef(LPT->D, Record);
- Record.push_back(LPT->Toks.size());
+ AddDeclRef(LPT.D, Record);
+ Record.push_back(LPT.Toks.size());
- for (const auto &Tok : LPT->Toks) {
+ for (const auto &Tok : LPT.Toks) {
AddToken(Tok, Record);
}
}
@@ -4255,9 +4408,10 @@ uint64_t ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// Build a record containing some declaration references.
RecordData SemaDeclRefs;
- if (SemaRef.StdNamespace || SemaRef.StdBadAlloc) {
+ if (SemaRef.StdNamespace || SemaRef.StdBadAlloc || SemaRef.StdAlignValT) {
AddDeclRef(SemaRef.getStdNamespace(), SemaDeclRefs);
AddDeclRef(SemaRef.getStdBadAlloc(), SemaDeclRefs);
+ AddDeclRef(SemaRef.getStdAlignValT(), SemaDeclRefs);
}
RecordData CUDASpecialDeclRefs;
@@ -4364,8 +4518,9 @@ uint64_t ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
Number.second));
// Make sure visible decls, added to DeclContexts previously loaded from
- // an AST file, are registered for serialization.
- for (const auto *I : UpdatingVisibleDecls) {
+ // an AST file, are registered for serialization. Likewise for template
+ // specializations added to imported templates.
+ for (const auto *I : DeclsToEmitEvenIfUnreferenced) {
GetDeclRef(I);
}
@@ -4510,11 +4665,25 @@ uint64_t ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteIdentifierTable(PP, SemaRef.IdResolver, isModule);
WriteFPPragmaOptions(SemaRef.getFPOptions());
WriteOpenCLExtensions(SemaRef);
+ WriteOpenCLExtensionTypes(SemaRef);
+ WriteOpenCLExtensionDecls(SemaRef);
+ WriteCUDAPragmas(SemaRef);
WritePragmaDiagnosticMappings(Context.getDiagnostics(), isModule);
// If we're emitting a module, write out the submodule information.
if (WritingModule)
WriteSubmodules(WritingModule);
+ else if (!getLangOpts().CurrentModule.empty()) {
+ // If we're building a PCH in the implementation of a module, we may need
+ // the description of the current module.
+ //
+ // FIXME: We may need other modules that we did not load from an AST file,
+ // such as if a module declares a 'conflicts' on a different module.
+ Module *M = PP.getHeaderSearchInfo().getModuleMap().findModule(
+ getLangOpts().CurrentModule);
+ if (M && !M->IsFromModuleFile)
+ WriteSubmodules(M);
+ }
Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
@@ -4682,6 +4851,11 @@ void ASTWriter::WriteDeclUpdatesBlocks(RecordDataImpl &OffsetsRecord) {
cast<ParmVarDecl>(Update.getDecl())->getDefaultArg()));
break;
+ case UPD_CXX_INSTANTIATED_DEFAULT_MEMBER_INITIALIZER:
+ Record.AddStmt(
+ cast<FieldDecl>(Update.getDecl())->getInClassInitializer());
+ break;
+
case UPD_CXX_INSTANTIATED_CLASS_DEFINITION: {
auto *RD = cast<CXXRecordDecl>(D);
UpdatedDeclContexts.insert(RD->getPrimaryContext());
@@ -5159,7 +5333,7 @@ void ASTRecordWriter::AddDeclarationNameInfo(
void ASTRecordWriter::AddQualifierInfo(const QualifierInfo &Info) {
AddNestedNameSpecifierLoc(Info.QualifierLoc);
Record->push_back(Info.NumTemplParamLists);
- for (unsigned i=0, e=Info.NumTemplParamLists; i != e; ++i)
+ for (unsigned i = 0, e = Info.NumTemplParamLists; i != e; ++i)
AddTemplateParameterList(Info.TemplParamLists[i]);
}
@@ -5361,6 +5535,7 @@ void ASTRecordWriter::AddTemplateParameterList(
AddSourceLocation(TemplateParams->getTemplateLoc());
AddSourceLocation(TemplateParams->getLAngleLoc());
AddSourceLocation(TemplateParams->getRAngleLoc());
+ // TODO: Concepts
Record->push_back(TemplateParams->size());
for (const auto &P : *TemplateParams)
AddDeclRef(P);
@@ -5371,7 +5546,7 @@ void ASTRecordWriter::AddTemplateArgumentList(
const TemplateArgumentList *TemplateArgs) {
assert(TemplateArgs && "No TemplateArgs!");
Record->push_back(TemplateArgs->size());
- for (int i=0, e = TemplateArgs->size(); i != e; ++i)
+ for (int i = 0, e = TemplateArgs->size(); i != e; ++i)
AddTemplateArgument(TemplateArgs->get(i));
}
@@ -5382,7 +5557,7 @@ void ASTRecordWriter::AddASTTemplateArgumentListInfo(
AddSourceLocation(ASTTemplArgList->RAngleLoc);
Record->push_back(ASTTemplArgList->NumTemplateArgs);
const TemplateArgumentLoc *TemplArgs = ASTTemplArgList->getTemplateArgs();
- for (int i=0, e = ASTTemplArgList->NumTemplateArgs; i != e; ++i)
+ for (int i = 0, e = ASTTemplArgList->NumTemplateArgs; i != e; ++i)
AddTemplateArgumentLoc(TemplArgs[i]);
}
@@ -5452,13 +5627,8 @@ EmitCXXCtorInitializers(ASTWriter &W,
Writer.AddSourceLocation(Init->getLParenLoc());
Writer.AddSourceLocation(Init->getRParenLoc());
Writer.push_back(Init->isWritten());
- if (Init->isWritten()) {
+ if (Init->isWritten())
Writer.push_back(Init->getSourceOrder());
- } else {
- Writer.push_back(Init->getNumArrayIndices());
- for (auto *VD : Init->getArrayIndices())
- Writer.AddDeclRef(VD);
- }
}
return Writer.Emit(serialization::DECL_CXX_CTOR_INITIALIZERS);
@@ -5539,7 +5709,7 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
Record->push_back(Lambda.NumCaptures);
Record->push_back(Lambda.NumExplicitCaptures);
Record->push_back(Lambda.ManglingNumber);
- AddDeclRef(Lambda.ContextDecl);
+ AddDeclRef(D->getLambdaContextDecl());
AddTypeSourceInfo(Lambda.MethodTyInfo);
for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
const LambdaCapture &Capture = Lambda.Captures[I];
@@ -5693,9 +5863,9 @@ void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
// that we write out all of its lookup results so we don't get a nasty
// surprise when we try to emit its lookup table.
for (auto *Child : DC->decls())
- UpdatingVisibleDecls.push_back(Child);
+ DeclsToEmitEvenIfUnreferenced.push_back(Child);
}
- UpdatingVisibleDecls.push_back(D);
+ DeclsToEmitEvenIfUnreferenced.push_back(D);
}
void ASTWriter::AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {
@@ -5794,6 +5964,15 @@ void ASTWriter::DefaultArgumentInstantiated(const ParmVarDecl *D) {
DeclUpdate(UPD_CXX_INSTANTIATED_DEFAULT_ARGUMENT, D));
}
+void ASTWriter::DefaultMemberInitializerInstantiated(const FieldDecl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return;
+
+ DeclUpdates[D].push_back(
+ DeclUpdate(UPD_CXX_INSTANTIATED_DEFAULT_MEMBER_INITIALIZER, D));
+}
+
void ASTWriter::AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
const ObjCInterfaceDecl *IFD) {
if (Chain && Chain->isProcessingUpdateRecords()) return;
@@ -5855,3 +6034,39 @@ void ASTWriter::AddedAttributeToRecord(const Attr *Attr,
return;
DeclUpdates[Record].push_back(DeclUpdate(UPD_ADDED_ATTR_TO_RECORD, Attr));
}
+
+void ASTWriter::AddedCXXTemplateSpecialization(
+ const ClassTemplateDecl *TD, const ClassTemplateSpecializationDecl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+
+ if (!TD->getFirstDecl()->isFromASTFile())
+ return;
+ if (Chain && Chain->isProcessingUpdateRecords())
+ return;
+
+ DeclsToEmitEvenIfUnreferenced.push_back(D);
+}
+
+void ASTWriter::AddedCXXTemplateSpecialization(
+ const VarTemplateDecl *TD, const VarTemplateSpecializationDecl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+
+ if (!TD->getFirstDecl()->isFromASTFile())
+ return;
+ if (Chain && Chain->isProcessingUpdateRecords())
+ return;
+
+ DeclsToEmitEvenIfUnreferenced.push_back(D);
+}
+
+void ASTWriter::AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
+ const FunctionDecl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+
+ if (!TD->getFirstDecl()->isFromASTFile())
+ return;
+ if (Chain && Chain->isProcessingUpdateRecords())
+ return;
+
+ DeclsToEmitEvenIfUnreferenced.push_back(D);
+}
diff --git a/lib/Serialization/ASTWriterDecl.cpp b/lib/Serialization/ASTWriterDecl.cpp
index 23d18540e822..ee220f00a81f 100644
--- a/lib/Serialization/ASTWriterDecl.cpp
+++ b/lib/Serialization/ASTWriterDecl.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Serialization/ASTWriter.h"
#include "ASTCommon.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclContextInternals.h"
@@ -20,7 +19,7 @@
#include "clang/AST/Expr.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Serialization/ASTReader.h"
-#include "llvm/ADT/Twine.h"
+#include "clang/Serialization/ASTWriter.h"
#include "llvm/Bitcode/BitstreamWriter.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -97,6 +96,8 @@ namespace clang {
void VisitVarDecl(VarDecl *D);
void VisitImplicitParamDecl(ImplicitParamDecl *D);
void VisitParmVarDecl(ParmVarDecl *D);
+ void VisitDecompositionDecl(DecompositionDecl *D);
+ void VisitBindingDecl(BindingDecl *D);
void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
void VisitTemplateDecl(TemplateDecl *D);
void VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
@@ -106,9 +107,11 @@ namespace clang {
void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
void VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingPackDecl(UsingPackDecl *D);
void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitConstructorUsingShadowDecl(ConstructorUsingShadowDecl *D);
void VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ void VisitExportDecl(ExportDecl *D);
void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
void VisitImportDecl(ImportDecl *D);
void VisitAccessSpecDecl(AccessSpecDecl *D);
@@ -892,6 +895,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
Record.push_back(D->getTSCSpec());
Record.push_back(D->getInitStyle());
if (!isa<ParmVarDecl>(D)) {
+ Record.push_back(D->isThisDeclarationADemotedDefinition());
Record.push_back(D->isExceptionVariable());
Record.push_back(D->isNRVOVariable());
Record.push_back(D->isCXXForRangeDecl());
@@ -940,10 +944,7 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
D->getDeclName().getNameKind() == DeclarationName::Identifier &&
!D->hasExtInfo() &&
D->getFirstDecl() == D->getMostRecentDecl() &&
- D->getInitStyle() == VarDecl::CInit &&
- D->getInit() == nullptr &&
- !isa<ParmVarDecl>(D) &&
- !isa<VarTemplateSpecializationDecl>(D) &&
+ D->getKind() == Decl::Var &&
!D->isInline() &&
!D->isConstexpr() &&
!D->isInitCapture() &&
@@ -999,6 +1000,8 @@ void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
// Check things we know are true of *every* PARM_VAR_DECL, which is more than
// just us assuming it.
assert(!D->getTSCSpec() && "PARM_VAR_DECL can't use TLS");
+ assert(!D->isThisDeclarationADemotedDefinition()
+ && "PARM_VAR_DECL can't be demoted definition.");
assert(D->getAccess() == AS_none && "PARM_VAR_DECL can't be public/private");
assert(!D->isExceptionVariable() && "PARM_VAR_DECL can't be exception var");
assert(D->getPreviousDecl() == nullptr && "PARM_VAR_DECL can't be redecl");
@@ -1006,6 +1009,22 @@ void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
"PARM_VAR_DECL can't be static data member");
}
+void ASTDeclWriter::VisitDecompositionDecl(DecompositionDecl *D) {
+ // Record the number of bindings first to simplify deserialization.
+ Record.push_back(D->bindings().size());
+
+ VisitVarDecl(D);
+ for (auto *B : D->bindings())
+ Record.AddDeclRef(B);
+ Code = serialization::DECL_DECOMPOSITION;
+}
+
+void ASTDeclWriter::VisitBindingDecl(BindingDecl *D) {
+ VisitValueDecl(D);
+ Record.AddStmt(D->getBinding());
+ Code = serialization::DECL_BINDING;
+}
+
void ASTDeclWriter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
VisitDecl(D);
Record.AddStmt(D->getAsmString());
@@ -1064,6 +1083,12 @@ void ASTDeclWriter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
Code = serialization::DECL_LINKAGE_SPEC;
}
+void ASTDeclWriter::VisitExportDecl(ExportDecl *D) {
+ VisitDecl(D);
+ Record.AddSourceLocation(D->getRBraceLoc());
+ Code = serialization::DECL_EXPORT;
+}
+
void ASTDeclWriter::VisitLabelDecl(LabelDecl *D) {
VisitNamedDecl(D);
Record.AddSourceLocation(D->getLocStart());
@@ -1118,6 +1143,15 @@ void ASTDeclWriter::VisitUsingDecl(UsingDecl *D) {
Code = serialization::DECL_USING;
}
+void ASTDeclWriter::VisitUsingPackDecl(UsingPackDecl *D) {
+ Record.push_back(D->NumExpansions);
+ VisitNamedDecl(D);
+ Record.AddDeclRef(D->getInstantiatedFromUsingDecl());
+ for (auto *E : D->expansions())
+ Record.AddDeclRef(E);
+ Code = serialization::DECL_USING_PACK;
+}
+
void ASTDeclWriter::VisitUsingShadowDecl(UsingShadowDecl *D) {
VisitRedeclarable(D);
VisitNamedDecl(D);
@@ -1151,6 +1185,7 @@ void ASTDeclWriter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
Record.AddSourceLocation(D->getUsingLoc());
Record.AddNestedNameSpecifierLoc(D->getQualifierLoc());
Record.AddDeclarationNameLoc(D->DNLoc, D->getDeclName());
+ Record.AddSourceLocation(D->getEllipsisLoc());
Code = serialization::DECL_UNRESOLVED_USING_VALUE;
}
@@ -1159,6 +1194,7 @@ void ASTDeclWriter::VisitUnresolvedUsingTypenameDecl(
VisitTypeDecl(D);
Record.AddSourceLocation(D->getTypenameLoc());
Record.AddNestedNameSpecifierLoc(D->getQualifierLoc());
+ Record.AddSourceLocation(D->getEllipsisLoc());
Code = serialization::DECL_UNRESOLVED_USING_TYPENAME;
}
@@ -1857,9 +1893,9 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
// VarDecl
- Abv->Add(BitCodeAbbrevOp(0)); // StorageClass
- Abv->Add(BitCodeAbbrevOp(0)); // getTSCSpec
- Abv->Add(BitCodeAbbrevOp(0)); // hasCXXDirectInitializer
+ Abv->Add(BitCodeAbbrevOp(0)); // SClass
+ Abv->Add(BitCodeAbbrevOp(0)); // TSCSpec
+ Abv->Add(BitCodeAbbrevOp(0)); // InitStyle
Abv->Add(BitCodeAbbrevOp(0)); // Linkage
Abv->Add(BitCodeAbbrevOp(0)); // HasInit
Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
@@ -1933,9 +1969,10 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
// VarDecl
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // StorageClass
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // getTSCSpec
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // CXXDirectInitializer
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // SClass
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // TSCSpec
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // InitStyle
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsThisDeclarationADemotedDefinition
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isExceptionVariable
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isNRVOVariable
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCXXForRangeDecl
@@ -1946,8 +1983,8 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isInitCapture
Abv->Add(BitCodeAbbrevOp(0)); // isPrevDeclInSameScope
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Linkage
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasInit
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasMemberSpecInfo
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // IsInitICE (local)
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // VarKind (local enum)
// Type Source Info
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
@@ -2123,11 +2160,11 @@ static bool isRequiredDecl(const Decl *D, ASTContext &Context,
D->hasAttr<OMPDeclareTargetDeclAttr>())
return true;
- // ImportDecl is used by codegen to determine the set of imported modules to
- // search for inputs for automatic linking; include it if it has a semantic
- // effect.
- if (isa<ImportDecl>(D) && !WritingModule)
- return true;
+ if (WritingModule && (isa<VarDecl>(D) || isa<ImportDecl>(D))) {
+ // These declarations are part of the module initializer, and are emitted
+ // if and when the module is imported, rather than being emitted eagerly.
+ return false;
+ }
return Context.DeclMustBeEmitted(D);
}
diff --git a/lib/Serialization/ASTWriterStmt.cpp b/lib/Serialization/ASTWriterStmt.cpp
index 84e718e9ef23..3993be146edf 100644
--- a/lib/Serialization/ASTWriterStmt.cpp
+++ b/lib/Serialization/ASTWriterStmt.cpp
@@ -792,6 +792,18 @@ void ASTStmtWriter::VisitNoInitExpr(NoInitExpr *E) {
Code = serialization::EXPR_NO_INIT;
}
+void ASTStmtWriter::VisitArrayInitLoopExpr(ArrayInitLoopExpr *E) {
+ VisitExpr(E);
+ Record.AddStmt(E->SubExprs[0]);
+ Record.AddStmt(E->SubExprs[1]);
+ Code = serialization::EXPR_ARRAY_INIT_LOOP;
+}
+
+void ASTStmtWriter::VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
+ VisitExpr(E);
+ Code = serialization::EXPR_ARRAY_INIT_INDEX;
+}
+
void ASTStmtWriter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
VisitExpr(E);
Code = serialization::EXPR_IMPLICIT_VALUE_INIT;
@@ -1245,10 +1257,6 @@ void ASTStmtWriter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
void ASTStmtWriter::VisitLambdaExpr(LambdaExpr *E) {
VisitExpr(E);
Record.push_back(E->NumCaptures);
- unsigned NumArrayIndexVars = 0;
- if (E->HasArrayIndexVars)
- NumArrayIndexVars = E->getArrayIndexStarts()[E->NumCaptures];
- Record.push_back(NumArrayIndexVars);
Record.AddSourceRange(E->IntroducerRange);
Record.push_back(E->CaptureDefault); // FIXME: stable encoding
Record.AddSourceLocation(E->CaptureDefaultLoc);
@@ -1263,15 +1271,6 @@ void ASTStmtWriter::VisitLambdaExpr(LambdaExpr *E) {
Record.AddStmt(*C);
}
- // Add array index variables, if any.
- if (NumArrayIndexVars) {
- Record.append(E->getArrayIndexStarts(),
- E->getArrayIndexStarts() + E->NumCaptures + 1);
- VarDecl **ArrayIndexVars = E->getArrayIndexVars();
- for (unsigned I = 0; I != NumArrayIndexVars; ++I)
- Record.AddDeclRef(ArrayIndexVars[I]);
- }
-
Code = serialization::EXPR_LAMBDA;
}
@@ -1392,6 +1391,7 @@ void ASTStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) {
VisitExpr(E);
Record.push_back(E->isGlobalNew());
Record.push_back(E->isArray());
+ Record.push_back(E->passAlignment());
Record.push_back(E->doesUsualArrayDeleteWantSize());
Record.push_back(E->getNumPlacementArgs());
Record.push_back(E->StoredInitializationStyle);
@@ -1576,6 +1576,7 @@ void ASTStmtWriter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
Record.push_back(E->getValue());
Record.AddSourceRange(E->getSourceRange());
Record.AddTypeSourceInfo(E->getQueriedTypeSourceInfo());
+ Record.AddStmt(E->getDimensionExpression());
Code = serialization::EXPR_ARRAY_TYPE_TRAIT;
}
@@ -2151,17 +2152,45 @@ void OMPClauseWriter::VisitOMPFromClause(OMPFromClause *C) {
void OMPClauseWriter::VisitOMPUseDevicePtrClause(OMPUseDevicePtrClause *C) {
Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
Record.AddSourceLocation(C->getLParenLoc());
- for (auto *VE : C->varlists()) {
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *VE : C->private_copies())
+ Record.AddStmt(VE);
+ for (auto *VE : C->inits())
Record.AddStmt(VE);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
}
}
void OMPClauseWriter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
Record.AddSourceLocation(C->getLParenLoc());
- for (auto *VE : C->varlists()) {
- Record.AddStmt(VE);
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
}
}
@@ -2479,6 +2508,54 @@ void ASTStmtWriter::VisitOMPTargetParallelForSimdDirective(
Code = serialization::STMT_OMP_TARGET_PARALLEL_FOR_SIMD_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPTargetSimdDirective(OMPTargetSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TARGET_SIMD_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTeamsDistributeDirective(
+ OMPTeamsDistributeDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TEAMS_DISTRIBUTE_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTeamsDistributeSimdDirective(
+ OMPTeamsDistributeSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTeamsDistributeParallelForSimdDirective(
+ OMPTeamsDistributeParallelForSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTeamsDistributeParallelForDirective(
+ OMPTeamsDistributeParallelForDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTargetTeamsDirective(OMPTargetTeamsDirective *D) {
+ VisitStmt(D);
+ Record.push_back(D->getNumClauses());
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_TARGET_TEAMS_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTargetTeamsDistributeDirective(
+ OMPTargetTeamsDistributeDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TARGET_TEAMS_DISTRIBUTE_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTargetTeamsDistributeParallelForDirective(
+ OMPTargetTeamsDistributeParallelForDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE;
+}
+
//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/Serialization/GeneratePCH.cpp b/lib/Serialization/GeneratePCH.cpp
index 308fde8b1dd7..e1765dafd96f 100644
--- a/lib/Serialization/GeneratePCH.cpp
+++ b/lib/Serialization/GeneratePCH.cpp
@@ -12,24 +12,21 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Serialization/ASTWriter.h"
-#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
-#include "clang/Basic/FileManager.h"
+#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/SemaConsumer.h"
+#include "clang/Serialization/ASTWriter.h"
#include "llvm/Bitcode/BitstreamWriter.h"
-#include <string>
using namespace clang;
PCHGenerator::PCHGenerator(
- const Preprocessor &PP, StringRef OutputFile,
- clang::Module *Module, StringRef isysroot,
- std::shared_ptr<PCHBuffer> Buffer,
- ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>> Extensions,
- bool AllowASTWithErrors, bool IncludeTimestamps)
- : PP(PP), OutputFile(OutputFile), Module(Module), isysroot(isysroot.str()),
+ const Preprocessor &PP, StringRef OutputFile, StringRef isysroot,
+ std::shared_ptr<PCHBuffer> Buffer,
+ ArrayRef<llvm::IntrusiveRefCntPtr<ModuleFileExtension>> Extensions,
+ bool AllowASTWithErrors, bool IncludeTimestamps)
+ : PP(PP), OutputFile(OutputFile), isysroot(isysroot.str()),
SemaPtr(nullptr), Buffer(Buffer), Stream(Buffer->Data),
Writer(Stream, Extensions, IncludeTimestamps),
AllowASTWithErrors(AllowASTWithErrors) {
@@ -48,6 +45,16 @@ void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
if (hasErrors && !AllowASTWithErrors)
return;
+ Module *Module = nullptr;
+ if (PP.getLangOpts().isCompilingModule()) {
+ Module = PP.getHeaderSearchInfo().lookupModule(
+ PP.getLangOpts().CurrentModule, /*AllowSearch*/ false);
+ if (!Module) {
+ assert(hasErrors && "emitting module but current module doesn't exist");
+ return;
+ }
+ }
+
// Emit the PCH file to the Buffer.
assert(SemaPtr && "No Sema?");
Buffer->Signature =
diff --git a/lib/Serialization/GlobalModuleIndex.cpp b/lib/Serialization/GlobalModuleIndex.cpp
index 581e894d9150..9f986d54a989 100644
--- a/lib/Serialization/GlobalModuleIndex.cpp
+++ b/lib/Serialization/GlobalModuleIndex.cpp
@@ -245,12 +245,8 @@ GlobalModuleIndex::readIndex(StringRef Path) {
return std::make_pair(nullptr, EC_NotFound);
std::unique_ptr<llvm::MemoryBuffer> Buffer = std::move(BufferOrErr.get());
- /// \brief The bitstream reader from which we'll read the AST file.
- llvm::BitstreamReader Reader((const unsigned char *)Buffer->getBufferStart(),
- (const unsigned char *)Buffer->getBufferEnd());
-
/// \brief The main bitstream cursor for the main block.
- llvm::BitstreamCursor Cursor(Reader);
+ llvm::BitstreamCursor Cursor(*Buffer);
// Sniff for the signature.
if (Cursor.Read(8) != 'B' ||
@@ -460,7 +456,7 @@ static void emitRecordID(unsigned ID, const char *Name,
void
GlobalModuleIndexBuilder::emitBlockInfoBlock(llvm::BitstreamWriter &Stream) {
SmallVector<uint64_t, 64> Record;
- Stream.EnterSubblock(llvm::bitc::BLOCKINFO_BLOCK_ID, 3);
+ Stream.EnterBlockInfoBlock();
#define BLOCK(X) emitBlockID(X ## _ID, #X, Stream, Record)
#define RECORD(X) emitRecordID(X, #X, Stream, Record)
@@ -504,9 +500,7 @@ bool GlobalModuleIndexBuilder::loadModuleFile(const FileEntry *File) {
}
// Initialize the input stream
- llvm::BitstreamReader InStreamFile;
- PCHContainerRdr.ExtractPCH((*Buffer)->getMemBufferRef(), InStreamFile);
- llvm::BitstreamCursor InStream(InStreamFile);
+ llvm::BitstreamCursor InStream(PCHContainerRdr.ExtractPCH(**Buffer));
// Sniff for the signature.
if (InStream.Read(8) != 'C' ||
diff --git a/lib/Serialization/Module.cpp b/lib/Serialization/Module.cpp
index ca0cb3c8ea17..72b08610bb4d 100644
--- a/lib/Serialization/Module.cpp
+++ b/lib/Serialization/Module.cpp
@@ -13,7 +13,6 @@
//===----------------------------------------------------------------------===//
#include "clang/Serialization/Module.h"
#include "ASTReaderInternals.h"
-#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
diff --git a/lib/Serialization/ModuleFileExtension.cpp b/lib/Serialization/ModuleFileExtension.cpp
index 81dcfd60ce8e..5bd0a1ce660b 100644
--- a/lib/Serialization/ModuleFileExtension.cpp
+++ b/lib/Serialization/ModuleFileExtension.cpp
@@ -8,7 +8,6 @@
//===----------------------------------------------------------------------===//
#include "clang/Serialization/ModuleFileExtension.h"
#include "llvm/ADT/Hashing.h"
-#include "llvm/Support/raw_ostream.h"
using namespace clang;
ModuleFileExtension::~ModuleFileExtension() { }
diff --git a/lib/Serialization/ModuleManager.cpp b/lib/Serialization/ModuleManager.cpp
index 292f36dfeb2a..722b547e803e 100644
--- a/lib/Serialization/ModuleManager.cpp
+++ b/lib/Serialization/ModuleManager.cpp
@@ -11,14 +11,13 @@
// modules for the ASTReader.
//
//===----------------------------------------------------------------------===//
+#include "clang/Serialization/ModuleManager.h"
#include "clang/Frontend/PCHContainerOperations.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/ModuleMap.h"
#include "clang/Serialization/GlobalModuleIndex.h"
-#include "clang/Serialization/ModuleManager.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
-#include "llvm/Support/raw_ostream.h"
#include <system_error>
#ifndef NDEBUG
@@ -67,7 +66,7 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
// Look for the file entry. This only fails if the expected size or
// modification time differ.
const FileEntry *Entry;
- if (Type == MK_ExplicitModule) {
+ if (Type == MK_ExplicitModule || Type == MK_PrebuiltModule) {
// If we're not expecting to pull this file out of the module cache, it
// might have a different mtime due to being moved across filesystems in
// a distributed build. The size must still match, though. (As must the
@@ -85,37 +84,31 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
}
// Check whether we already loaded this module, before
- ModuleFile *&ModuleEntry = Modules[Entry];
+ ModuleFile *ModuleEntry = Modules[Entry];
bool NewModule = false;
if (!ModuleEntry) {
// Allocate a new module.
- ModuleFile *New = new ModuleFile(Type, Generation);
- New->Index = Chain.size();
- New->FileName = FileName.str();
- New->File = Entry;
- New->ImportLoc = ImportLoc;
- Chain.push_back(New);
- if (!New->isModule())
- PCHChain.push_back(New);
- if (!ImportedBy)
- Roots.push_back(New);
NewModule = true;
- ModuleEntry = New;
-
- New->InputFilesValidationTimestamp = 0;
- if (New->Kind == MK_ImplicitModule) {
- std::string TimestampFilename = New->getTimestampFilename();
+ ModuleEntry = new ModuleFile(Type, Generation);
+ ModuleEntry->Index = Chain.size();
+ ModuleEntry->FileName = FileName.str();
+ ModuleEntry->File = Entry;
+ ModuleEntry->ImportLoc = ImportLoc;
+ ModuleEntry->InputFilesValidationTimestamp = 0;
+
+ if (ModuleEntry->Kind == MK_ImplicitModule) {
+ std::string TimestampFilename = ModuleEntry->getTimestampFilename();
vfs::Status Status;
// A cached stat value would be fine as well.
if (!FileMgr.getNoncachedStatValue(TimestampFilename, Status))
- New->InputFilesValidationTimestamp =
- Status.getLastModificationTime().toEpochTime();
+ ModuleEntry->InputFilesValidationTimestamp =
+ llvm::sys::toTimeT(Status.getLastModificationTime());
}
// Load the contents of the module
if (std::unique_ptr<llvm::MemoryBuffer> Buffer = lookupBuffer(FileName)) {
// The buffer was already provided for us.
- New->Buffer = std::move(Buffer);
+ ModuleEntry->Buffer = std::move(Buffer);
} else {
// Open the AST file.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buf(
@@ -127,52 +120,40 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
// ModuleManager it must be the same underlying file.
// FIXME: Because FileManager::getFile() doesn't guarantee that it will
// give us an open file, this may not be 100% reliable.
- Buf = FileMgr.getBufferForFile(New->File,
+ Buf = FileMgr.getBufferForFile(ModuleEntry->File,
/*IsVolatile=*/false,
/*ShouldClose=*/false);
}
if (!Buf) {
ErrorStr = Buf.getError().message();
+ delete ModuleEntry;
return Missing;
}
- New->Buffer = std::move(*Buf);
+ ModuleEntry->Buffer = std::move(*Buf);
}
// Initialize the stream.
- PCHContainerRdr.ExtractPCH(New->Buffer->getMemBufferRef(), New->StreamFile);
+ ModuleEntry->Data = PCHContainerRdr.ExtractPCH(*ModuleEntry->Buffer);
}
if (ExpectedSignature) {
- if (NewModule)
- ModuleEntry->Signature = ReadSignature(ModuleEntry->StreamFile);
- else
- assert(ModuleEntry->Signature == ReadSignature(ModuleEntry->StreamFile));
+ // If we've not read the control block yet, read the signature eagerly now
+ // so that we can check it.
+ if (!ModuleEntry->Signature)
+ ModuleEntry->Signature = ReadSignature(ModuleEntry->Data);
if (ModuleEntry->Signature != ExpectedSignature) {
ErrorStr = ModuleEntry->Signature ? "signature mismatch"
: "could not read module signature";
- if (NewModule) {
- // Remove the module file immediately, since removeModules might try to
- // invalidate the file cache for Entry, and that is not safe if this
- // module is *itself* up to date, but has an out-of-date importer.
- Modules.erase(Entry);
- assert(Chain.back() == ModuleEntry);
- Chain.pop_back();
- if (!ModuleEntry->isModule())
- PCHChain.pop_back();
- if (Roots.back() == ModuleEntry)
- Roots.pop_back();
- else
- assert(ImportedBy);
+ if (NewModule)
delete ModuleEntry;
- }
return OutOfDate;
}
}
-
+
if (ImportedBy) {
ModuleEntry->ImportedBy.insert(ImportedBy);
ImportedBy->Imports.insert(ModuleEntry);
@@ -184,7 +165,20 @@ ModuleManager::addModule(StringRef FileName, ModuleKind Type,
}
Module = ModuleEntry;
- return NewModule? NewlyLoaded : AlreadyLoaded;
+
+ if (!NewModule)
+ return AlreadyLoaded;
+
+ assert(!Modules[Entry] && "module loaded twice");
+ Modules[Entry] = ModuleEntry;
+
+ Chain.push_back(ModuleEntry);
+ if (!ModuleEntry->isModule())
+ PCHChain.push_back(ModuleEntry);
+ if (!ImportedBy)
+ Roots.push_back(ModuleEntry);
+
+ return NewlyLoaded;
}
void ModuleManager::removeModules(
@@ -413,13 +407,16 @@ bool ModuleManager::lookupModuleFile(StringRef FileName,
off_t ExpectedSize,
time_t ExpectedModTime,
const FileEntry *&File) {
+ if (FileName == "-") {
+ File = nullptr;
+ return false;
+ }
+
// Open the file immediately to ensure there is no race between stat'ing and
// opening the file.
File = FileMgr.getFile(FileName, /*openFile=*/true, /*cacheFailure=*/false);
-
- if (!File && FileName != "-") {
+ if (!File)
return false;
- }
if ((ExpectedSize && ExpectedSize != File->getSize()) ||
(ExpectedModTime && ExpectedModTime != File->getModificationTime()))
@@ -434,15 +431,15 @@ bool ModuleManager::lookupModuleFile(StringRef FileName,
namespace llvm {
template<>
struct GraphTraits<ModuleManager> {
- typedef ModuleFile NodeType;
+ typedef ModuleFile *NodeRef;
typedef llvm::SetVector<ModuleFile *>::const_iterator ChildIteratorType;
typedef ModuleManager::ModuleConstIterator nodes_iterator;
-
- static ChildIteratorType child_begin(NodeType *Node) {
+
+ static ChildIteratorType child_begin(NodeRef Node) {
return Node->Imports.begin();
}
- static ChildIteratorType child_end(NodeType *Node) {
+ static ChildIteratorType child_end(NodeRef Node) {
return Node->Imports.end();
}
diff --git a/lib/Serialization/MultiOnDiskHashTable.h b/lib/Serialization/MultiOnDiskHashTable.h
index 04dea831695c..fdbbb602b537 100644
--- a/lib/Serialization/MultiOnDiskHashTable.h
+++ b/lib/Serialization/MultiOnDiskHashTable.h
@@ -18,7 +18,11 @@
#ifndef LLVM_CLANG_LIB_SERIALIZATION_MULTIONDISKHASHTABLE_H
#define LLVM_CLANG_LIB_SERIALIZATION_MULTIONDISKHASHTABLE_H
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/OnDiskHashTable.h"
diff --git a/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp b/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
new file mode 100644
index 000000000000..e6592a285e47
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp
@@ -0,0 +1,68 @@
+//===- AnalysisOrderChecker - Print callbacks called ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker prints callbacks that are called during analysis.
+// This is required to ensure that callbacks are fired in order
+// and do not duplicate or get lost.
+// Feel free to extend this checker with any callback you need to check.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class AnalysisOrderChecker : public Checker< check::PreStmt<CastExpr>,
+ check::PostStmt<CastExpr>,
+ check::PreStmt<ArraySubscriptExpr>,
+ check::PostStmt<ArraySubscriptExpr>> {
+ bool isCallbackEnabled(CheckerContext &C, StringRef CallbackName) const {
+ AnalyzerOptions &Opts = C.getAnalysisManager().getAnalyzerOptions();
+ return Opts.getBooleanOption("*", false, this) ||
+ Opts.getBooleanOption(CallbackName, false, this);
+ }
+
+public:
+ void checkPreStmt(const CastExpr *CE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PreStmtCastExpr"))
+ llvm::errs() << "PreStmt<CastExpr> (Kind : " << CE->getCastKindName()
+ << ")\n";
+ }
+
+ void checkPostStmt(const CastExpr *CE, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PostStmtCastExpr"))
+ llvm::errs() << "PostStmt<CastExpr> (Kind : " << CE->getCastKindName()
+ << ")\n";
+ }
+
+ void checkPreStmt(const ArraySubscriptExpr *SubExpr, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PreStmtArraySubscriptExpr"))
+ llvm::errs() << "PreStmt<ArraySubscriptExpr>\n";
+ }
+
+ void checkPostStmt(const ArraySubscriptExpr *SubExpr, CheckerContext &C) const {
+ if (isCallbackEnabled(C, "PostStmtArraySubscriptExpr"))
+ llvm::errs() << "PostStmt<ArraySubscriptExpr>\n";
+ }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerAnalysisOrderChecker(CheckerManager &mgr) {
+ mgr.registerChecker<AnalysisOrderChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 13f0f655b89c..848c2662019a 100644
--- a/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -17,6 +17,7 @@
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
@@ -67,18 +68,47 @@ public:
static SVal computeExtentBegin(SValBuilder &svalBuilder,
const MemRegion *region) {
- while (true)
- switch (region->getKind()) {
+ const MemSpaceRegion *SR = region->getMemorySpace();
+ if (SR->getKind() == MemRegion::UnknownSpaceRegionKind)
+ return UnknownVal();
+ else
+ return svalBuilder.makeZeroArrayIndex();
+}
+
+// TODO: once the constraint manager is smart enough to handle non simplified
+// symbolic expressions remove this function. Note that this can not be used in
+// the constraint manager as is, since this does not handle overflows. It is
+// safe to assume, however, that memory offsets will not overflow.
+static std::pair<NonLoc, nonloc::ConcreteInt>
+getSimplifiedOffsets(NonLoc offset, nonloc::ConcreteInt extent,
+ SValBuilder &svalBuilder) {
+ Optional<nonloc::SymbolVal> SymVal = offset.getAs<nonloc::SymbolVal>();
+ if (SymVal && SymVal->isExpression()) {
+ if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SymVal->getSymbol())) {
+ llvm::APSInt constant =
+ APSIntType(extent.getValue()).convert(SIE->getRHS());
+ switch (SIE->getOpcode()) {
+ case BO_Mul:
+ // The constant should never be 0 here, since it the result of scaling
+ // based on the size of a type which is never 0.
+ if ((extent.getValue() % constant) != 0)
+ return std::pair<NonLoc, nonloc::ConcreteInt>(offset, extent);
+ else
+ return getSimplifiedOffsets(
+ nonloc::SymbolVal(SIE->getLHS()),
+ svalBuilder.makeIntVal(extent.getValue() / constant),
+ svalBuilder);
+ case BO_Add:
+ return getSimplifiedOffsets(
+ nonloc::SymbolVal(SIE->getLHS()),
+ svalBuilder.makeIntVal(extent.getValue() - constant), svalBuilder);
default:
- return svalBuilder.makeZeroArrayIndex();
- case MemRegion::SymbolicRegionKind:
- // FIXME: improve this later by tracking symbolic lower bounds
- // for symbolic regions.
- return UnknownVal();
- case MemRegion::ElementRegionKind:
- region = cast<SubRegion>(region)->getSuperRegion();
- continue;
+ break;
+ }
}
+ }
+
+ return std::pair<NonLoc, nonloc::ConcreteInt>(offset, extent);
}
void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
@@ -104,6 +134,8 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
if (!rawOffset.getRegion())
return;
+ NonLoc rawOffsetVal = rawOffset.getByteOffset();
+
// CHECK LOWER BOUND: Is byteOffset < extent begin?
// If so, we are doing a load/store
// before the first valid offset in the memory region.
@@ -111,9 +143,17 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
SVal extentBegin = computeExtentBegin(svalBuilder, rawOffset.getRegion());
if (Optional<NonLoc> NV = extentBegin.getAs<NonLoc>()) {
- SVal lowerBound =
- svalBuilder.evalBinOpNN(state, BO_LT, rawOffset.getByteOffset(), *NV,
- svalBuilder.getConditionType());
+ if (NV->getAs<nonloc::ConcreteInt>()) {
+ std::pair<NonLoc, nonloc::ConcreteInt> simplifiedOffsets =
+ getSimplifiedOffsets(rawOffset.getByteOffset(),
+ NV->castAs<nonloc::ConcreteInt>(),
+ svalBuilder);
+ rawOffsetVal = simplifiedOffsets.first;
+ *NV = simplifiedOffsets.second;
+ }
+
+ SVal lowerBound = svalBuilder.evalBinOpNN(state, BO_LT, rawOffsetVal, *NV,
+ svalBuilder.getConditionType());
Optional<NonLoc> lowerBoundToCheck = lowerBound.getAs<NonLoc>();
if (!lowerBoundToCheck)
@@ -142,10 +182,18 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
if (!extentVal.getAs<NonLoc>())
break;
- SVal upperbound
- = svalBuilder.evalBinOpNN(state, BO_GE, rawOffset.getByteOffset(),
- extentVal.castAs<NonLoc>(),
- svalBuilder.getConditionType());
+ if (extentVal.getAs<nonloc::ConcreteInt>()) {
+ std::pair<NonLoc, nonloc::ConcreteInt> simplifiedOffsets =
+ getSimplifiedOffsets(rawOffset.getByteOffset(),
+ extentVal.castAs<nonloc::ConcreteInt>(),
+ svalBuilder);
+ rawOffsetVal = simplifiedOffsets.first;
+ extentVal = simplifiedOffsets.second;
+ }
+
+ SVal upperbound = svalBuilder.evalBinOpNN(state, BO_GE, rawOffsetVal,
+ extentVal.castAs<NonLoc>(),
+ svalBuilder.getConditionType());
Optional<NonLoc> upperboundToCheck = upperbound.getAs<NonLoc>();
if (!upperboundToCheck)
@@ -157,13 +205,13 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
// If we are under constrained and the index variables are tainted, report.
if (state_exceedsUpperBound && state_withinUpperBound) {
- if (state->isTainted(rawOffset.getByteOffset()))
+ if (state->isTainted(rawOffset.getByteOffset())) {
reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted);
return;
- }
-
- // If we are constrained enough to definitely exceed the upper bound, report.
- if (state_exceedsUpperBound) {
+ }
+ } else if (state_exceedsUpperBound) {
+ // If we are constrained enough to definitely exceed the upper bound,
+ // report.
assert(!state_withinUpperBound);
reportOOB(checkerContext, state_exceedsUpperBound, OOB_Excedes);
return;
diff --git a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 6239c5507a4b..1ea85d60c9e9 100644
--- a/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -336,15 +336,15 @@ void NilArgChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
}
//===----------------------------------------------------------------------===//
-// Error reporting.
+// Checking for mismatched types passed to CFNumberCreate/CFNumberGetValue.
//===----------------------------------------------------------------------===//
namespace {
-class CFNumberCreateChecker : public Checker< check::PreStmt<CallExpr> > {
+class CFNumberChecker : public Checker< check::PreStmt<CallExpr> > {
mutable std::unique_ptr<APIMisuse> BT;
- mutable IdentifierInfo* II;
+ mutable IdentifierInfo *ICreate, *IGetValue;
public:
- CFNumberCreateChecker() : II(nullptr) {}
+ CFNumberChecker() : ICreate(nullptr), IGetValue(nullptr) {}
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
@@ -425,7 +425,7 @@ static const char* GetCFNumberTypeStr(uint64_t i) {
}
#endif
-void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
+void CFNumberChecker::checkPreStmt(const CallExpr *CE,
CheckerContext &C) const {
ProgramStateRef state = C.getState();
const FunctionDecl *FD = C.getCalleeDecl(CE);
@@ -433,10 +433,12 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
return;
ASTContext &Ctx = C.getASTContext();
- if (!II)
- II = &Ctx.Idents.get("CFNumberCreate");
-
- if (FD->getIdentifier() != II || CE->getNumArgs() != 3)
+ if (!ICreate) {
+ ICreate = &Ctx.Idents.get("CFNumberCreate");
+ IGetValue = &Ctx.Idents.get("CFNumberGetValue");
+ }
+ if (!(FD->getIdentifier() == ICreate || FD->getIdentifier() == IGetValue) ||
+ CE->getNumArgs() != 3)
return;
// Get the value of the "theType" argument.
@@ -450,13 +452,13 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
return;
uint64_t NumberKind = V->getValue().getLimitedValue();
- Optional<uint64_t> OptTargetSize = GetCFNumberSize(Ctx, NumberKind);
+ Optional<uint64_t> OptCFNumberSize = GetCFNumberSize(Ctx, NumberKind);
// FIXME: In some cases we can emit an error.
- if (!OptTargetSize)
+ if (!OptCFNumberSize)
return;
- uint64_t TargetSize = *OptTargetSize;
+ uint64_t CFNumberSize = *OptCFNumberSize;
// Look at the value of the integer being passed by reference. Essentially
// we want to catch cases where the value passed in is not equal to the
@@ -481,39 +483,44 @@ void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
if (!T->isIntegralOrEnumerationType())
return;
- uint64_t SourceSize = Ctx.getTypeSize(T);
+ uint64_t PrimitiveTypeSize = Ctx.getTypeSize(T);
- // CHECK: is SourceSize == TargetSize
- if (SourceSize == TargetSize)
+ if (PrimitiveTypeSize == CFNumberSize)
return;
- // Generate an error. Only generate a sink error node
- // if 'SourceSize < TargetSize'; otherwise generate a non-fatal error node.
- //
// FIXME: We can actually create an abstract "CFNumber" object that has
// the bits initialized to the provided values.
- //
- ExplodedNode *N = SourceSize < TargetSize ? C.generateErrorNode()
- : C.generateNonFatalErrorNode();
+ ExplodedNode *N = C.generateNonFatalErrorNode();
if (N) {
SmallString<128> sbuf;
llvm::raw_svector_ostream os(sbuf);
+ bool isCreate = (FD->getIdentifier() == ICreate);
+
+ if (isCreate) {
+ os << (PrimitiveTypeSize == 8 ? "An " : "A ")
+ << PrimitiveTypeSize << "-bit integer is used to initialize a "
+ << "CFNumber object that represents "
+ << (CFNumberSize == 8 ? "an " : "a ")
+ << CFNumberSize << "-bit integer; ";
+ } else {
+ os << "A CFNumber object that represents "
+ << (CFNumberSize == 8 ? "an " : "a ")
+ << CFNumberSize << "-bit integer is used to initialize "
+ << (PrimitiveTypeSize == 8 ? "an " : "a ")
+ << PrimitiveTypeSize << "-bit integer; ";
+ }
- os << (SourceSize == 8 ? "An " : "A ")
- << SourceSize << " bit integer is used to initialize a CFNumber "
- "object that represents "
- << (TargetSize == 8 ? "an " : "a ")
- << TargetSize << " bit integer. ";
-
- if (SourceSize < TargetSize)
- os << (TargetSize - SourceSize)
- << " bits of the CFNumber value will be garbage." ;
+ if (PrimitiveTypeSize < CFNumberSize)
+ os << (CFNumberSize - PrimitiveTypeSize)
+ << " bits of the CFNumber value will "
+ << (isCreate ? "be garbage." : "overwrite adjacent storage.");
else
- os << (SourceSize - TargetSize)
- << " bits of the input integer will be lost.";
+ os << (PrimitiveTypeSize - CFNumberSize)
+ << " bits of the integer value will be "
+ << (isCreate ? "lost." : "garbage.");
if (!BT)
- BT.reset(new APIMisuse(this, "Bad use of CFNumberCreate"));
+ BT.reset(new APIMisuse(this, "Bad use of CFNumber APIs"));
auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
report->addRange(CE->getArg(2)->getSourceRange());
@@ -1272,8 +1279,8 @@ void ento::registerNilArgChecker(CheckerManager &mgr) {
mgr.registerChecker<NilArgChecker>();
}
-void ento::registerCFNumberCreateChecker(CheckerManager &mgr) {
- mgr.registerChecker<CFNumberCreateChecker>();
+void ento::registerCFNumberChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CFNumberChecker>();
}
void ento::registerCFRetainReleaseChecker(CheckerManager &mgr) {
diff --git a/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp b/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
new file mode 100644
index 000000000000..082a4873217b
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp
@@ -0,0 +1,109 @@
+//===-- BlockInCriticalSectionChecker.cpp -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a checker for blocks in critical sections. This checker should find
+// the calls to blocking functions (for example: sleep, getc, fgets, read,
+// recv etc.) inside a critical section. When sleep(x) is called while a mutex
+// is held, other threades cannot lock the same mutex. This might take some
+// time, leading to bad performance or even deadlock.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class BlockInCriticalSectionChecker : public Checker<check::PostCall,
+ check::PreCall> {
+
+ CallDescription LockFn, UnlockFn, SleepFn, GetcFn, FgetsFn, ReadFn, RecvFn;
+
+ std::unique_ptr<BugType> BlockInCritSectionBugType;
+
+ void reportBlockInCritSection(SymbolRef FileDescSym,
+ const CallEvent &call,
+ CheckerContext &C) const;
+
+public:
+ BlockInCriticalSectionChecker();
+
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+
+ /// Process unlock.
+ /// Process lock.
+ /// Process blocking functions (sleep, getc, fgets, read, recv)
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+
+};
+
+} // end anonymous namespace
+
+REGISTER_TRAIT_WITH_PROGRAMSTATE(MutexCounter, unsigned)
+
+BlockInCriticalSectionChecker::BlockInCriticalSectionChecker()
+ : LockFn("lock"), UnlockFn("unlock"), SleepFn("sleep"), GetcFn("getc"),
+ FgetsFn("fgets"), ReadFn("read"), RecvFn("recv") {
+ // Initialize the bug type.
+ BlockInCritSectionBugType.reset(
+ new BugType(this, "Call to blocking function in critical section",
+ "Blocking Error"));
+}
+
+void BlockInCriticalSectionChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+}
+
+void BlockInCriticalSectionChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!Call.isCalled(LockFn)
+ && !Call.isCalled(SleepFn)
+ && !Call.isCalled(GetcFn)
+ && !Call.isCalled(FgetsFn)
+ && !Call.isCalled(ReadFn)
+ && !Call.isCalled(RecvFn)
+ && !Call.isCalled(UnlockFn))
+ return;
+
+ ProgramStateRef State = C.getState();
+ unsigned mutexCount = State->get<MutexCounter>();
+ if (Call.isCalled(UnlockFn) && mutexCount > 0) {
+ State = State->set<MutexCounter>(--mutexCount);
+ C.addTransition(State);
+ } else if (Call.isCalled(LockFn)) {
+ State = State->set<MutexCounter>(++mutexCount);
+ C.addTransition(State);
+ } else if (mutexCount > 0) {
+ SymbolRef BlockDesc = Call.getReturnValue().getAsSymbol();
+ reportBlockInCritSection(BlockDesc, Call, C);
+ }
+}
+
+void BlockInCriticalSectionChecker::reportBlockInCritSection(
+ SymbolRef BlockDescSym, const CallEvent &Call, CheckerContext &C) const {
+ ExplodedNode *ErrNode = C.generateNonFatalErrorNode();
+ if (!ErrNode)
+ return;
+
+ auto R = llvm::make_unique<BugReport>(*BlockInCritSectionBugType,
+ "A blocking function %s is called inside a critical section.", ErrNode);
+ R->addRange(Call.getSourceRange());
+ R->markInteresting(BlockDescSym);
+ C.emitReport(std::move(R));
+}
+
+void ento::registerBlockInCriticalSectionChecker(CheckerManager &mgr) {
+ mgr.registerChecker<BlockInCriticalSectionChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index dab2f61229a0..8c2aef21b3ca 100644
--- a/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -55,6 +55,7 @@ bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
return true;
}
+ case Builtin::BI__builtin_alloca_with_align:
case Builtin::BI__builtin_alloca: {
// FIXME: Refactor into StoreManager itself?
MemRegionManager& RM = C.getStoreManager().getRegionManager();
diff --git a/lib/StaticAnalyzer/Checkers/CMakeLists.txt b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
index 62ccc3cb4970..41415f0376c0 100644
--- a/lib/StaticAnalyzer/Checkers/CMakeLists.txt
+++ b/lib/StaticAnalyzer/Checkers/CMakeLists.txt
@@ -4,10 +4,12 @@ set(LLVM_LINK_COMPONENTS
add_clang_library(clangStaticAnalyzerCheckers
AllocationDiagnostics.cpp
+ AnalysisOrderChecker.cpp
AnalyzerStatsChecker.cpp
ArrayBoundChecker.cpp
ArrayBoundCheckerV2.cpp
BasicObjCFoundationChecks.cpp
+ BlockInCriticalSectionChecker.cpp
BoolAssignmentChecker.cpp
BuiltinFunctionChecker.cpp
CStringChecker.cpp
@@ -22,6 +24,9 @@ add_clang_library(clangStaticAnalyzerCheckers
CheckerDocumentation.cpp
ChrootChecker.cpp
ClangCheckers.cpp
+ CloneChecker.cpp
+ ConversionChecker.cpp
+ CXXSelfAssignmentChecker.cpp
DeadStoresChecker.cpp
DebugCheckers.cpp
DereferenceChecker.cpp
@@ -32,6 +37,7 @@ add_clang_library(clangStaticAnalyzerCheckers
ExprInspectionChecker.cpp
FixedAddressChecker.cpp
GenericTaintChecker.cpp
+ GTestChecker.cpp
IdenticalExprChecker.cpp
IvarInvalidationChecker.cpp
LLVMConventionsChecker.cpp
@@ -49,10 +55,12 @@ add_clang_library(clangStaticAnalyzerCheckers
NoReturnFunctionChecker.cpp
NonNullParamChecker.cpp
NullabilityChecker.cpp
+ NumberObjectConversionChecker.cpp
ObjCAtSyncChecker.cpp
ObjCContainersASTChecker.cpp
ObjCContainersChecker.cpp
ObjCMissingSuperCallChecker.cpp
+ ObjCPropertyChecker.cpp
ObjCSelfInitChecker.cpp
ObjCSuperDeallocChecker.cpp
ObjCUnusedIVarsChecker.cpp
@@ -65,6 +73,7 @@ add_clang_library(clangStaticAnalyzerCheckers
ReturnUndefChecker.cpp
SimpleStreamChecker.cpp
StackAddrEscapeChecker.cpp
+ StdLibraryFunctionsChecker.cpp
StreamChecker.cpp
TaintTesterChecker.cpp
TestAfterDivZeroChecker.cpp
@@ -78,6 +87,7 @@ add_clang_library(clangStaticAnalyzerCheckers
UnreachableCodeChecker.cpp
VforkChecker.cpp
VLASizeChecker.cpp
+ ValistChecker.cpp
VirtualCallChecker.cpp
DEPENDS
@@ -85,6 +95,7 @@ add_clang_library(clangStaticAnalyzerCheckers
LINK_LIBS
clangAST
+ clangASTMatchers
clangAnalysis
clangBasic
clangLex
diff --git a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index e9512977fa6d..238032c895f6 100644
--- a/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -22,7 +22,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -63,7 +62,6 @@ public:
void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
void checkLiveSymbols(ProgramStateRef state, SymbolReaper &SR) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
- bool wantsRegionChangeUpdate(ProgramStateRef state) const;
ProgramStateRef
checkRegionChanges(ProgramStateRef state,
@@ -686,6 +684,7 @@ SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
QualType sizeTy = svalBuilder.getContext().getSizeType();
SVal strLength = svalBuilder.getMetadataSymbolVal(CStringChecker::getTag(),
MR, Ex, sizeTy,
+ C.getLocationContext(),
C.blockCount());
if (!hypothetical) {
@@ -2112,11 +2111,6 @@ void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
C.addTransition(state);
}
-bool CStringChecker::wantsRegionChangeUpdate(ProgramStateRef state) const {
- CStringLengthTy Entries = state->get<CStringLength>();
- return !Entries.isEmpty();
-}
-
ProgramStateRef
CStringChecker::checkRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *,
diff --git a/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp b/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
new file mode 100644
index 000000000000..7631322d255b
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp
@@ -0,0 +1,62 @@
+//=== CXXSelfAssignmentChecker.cpp -----------------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines CXXSelfAssignmentChecker, which tests all custom defined
+// copy and move assignment operators for the case of self assignment, thus
+// where the parameter refers to the same location where the this pointer
+// points to. The checker itself does not do any checks at all, but it
+// causes the analyzer to check every copy and move assignment operator twice:
+// once for when 'this' aliases with the parameter and once for when it may not.
+// It is the task of the other enabled checkers to find the bugs in these two
+// different cases.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class CXXSelfAssignmentChecker : public Checker<check::BeginFunction> {
+public:
+ CXXSelfAssignmentChecker();
+ void checkBeginFunction(CheckerContext &C) const;
+};
+}
+
+CXXSelfAssignmentChecker::CXXSelfAssignmentChecker() {}
+
+void CXXSelfAssignmentChecker::checkBeginFunction(CheckerContext &C) const {
+ if (!C.inTopFrame())
+ return;
+ const auto *LCtx = C.getLocationContext();
+ const auto *MD = dyn_cast<CXXMethodDecl>(LCtx->getDecl());
+ if (!MD)
+ return;
+ if (!MD->isCopyAssignmentOperator() && !MD->isMoveAssignmentOperator())
+ return;
+ auto &State = C.getState();
+ auto &SVB = C.getSValBuilder();
+ auto ThisVal =
+ State->getSVal(SVB.getCXXThis(MD, LCtx->getCurrentStackFrame()));
+ auto Param = SVB.makeLoc(State->getRegion(MD->getParamDecl(0), LCtx));
+ auto ParamVal = State->getSVal(Param);
+ ProgramStateRef SelfAssignState = State->bindLoc(Param, ThisVal);
+ C.addTransition(SelfAssignState);
+ ProgramStateRef NonSelfAssignState = State->bindLoc(Param, ParamVal);
+ C.addTransition(NonSelfAssignState);
+}
+
+void ento::registerCXXSelfAssignmentChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<CXXSelfAssignmentChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 5126716fcded..f474857a1bf4 100644
--- a/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -356,7 +356,6 @@ void CallAndMessageChecker::checkPreStmt(const CXXDeleteExpr *DE,
}
}
-
void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
@@ -389,11 +388,10 @@ void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
}
const Decl *D = Call.getDecl();
- const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
- if (FD) {
- // If we have a declaration, we can make sure we pass enough parameters to
- // the function.
- unsigned Params = FD->getNumParams();
+ if (D && (isa<FunctionDecl>(D) || isa<BlockDecl>(D))) {
+ // If we have a function or block declaration, we can make sure we pass
+ // enough parameters.
+ unsigned Params = Call.parameters().size();
if (Call.getNumArgs() < Params) {
ExplodedNode *N = C.generateErrorNode();
if (!N)
@@ -403,8 +401,14 @@ void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
SmallString<512> Str;
llvm::raw_svector_ostream os(Str);
- os << "Function taking " << Params << " argument"
- << (Params == 1 ? "" : "s") << " is called with less ("
+ if (isa<FunctionDecl>(D)) {
+ os << "Function ";
+ } else {
+ assert(isa<BlockDecl>(D));
+ os << "Block ";
+ }
+ os << "taking " << Params << " argument"
+ << (Params == 1 ? "" : "s") << " is called with fewer ("
<< Call.getNumArgs() << ")";
C.emitReport(
@@ -425,6 +429,7 @@ void CallAndMessageChecker::checkPreCall(const CallEvent &Call,
else
BT = &BT_call_arg;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
for (unsigned i = 0, e = Call.getNumArgs(); i != e; ++i) {
const ParmVarDecl *ParamDecl = nullptr;
if(FD && i < FD->getNumParams())
diff --git a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index 2337400750c7..3e178152d925 100644
--- a/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -140,5 +140,10 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
}
void ento::registerCastSizeChecker(CheckerManager &mgr) {
- mgr.registerChecker<CastSizeChecker>();
+ // PR31226: C++ is more complicated than what this checker currently supports.
+ // There are derived-to-base casts, there are different rules for 0-size
+ // structures, no flexible arrays, etc.
+ // FIXME: Disabled on C++ for now.
+ if (!mgr.getLangOpts().CPlusPlus)
+ mgr.registerChecker<CastSizeChecker>();
}
diff --git a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
index fa7841356efb..16a475ae9dd2 100644
--- a/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -1,4 +1,4 @@
-//=== CastToStructChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
+//=== CastToStructChecker.cpp ----------------------------------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,12 +8,13 @@
//===----------------------------------------------------------------------===//
//
// This files defines CastToStructChecker, a builtin checker that checks for
-// cast from non-struct pointer to struct pointer.
+// cast from non-struct pointer to struct pointer and widening struct data cast.
// This check corresponds to CWE-588.
//
//===----------------------------------------------------------------------===//
#include "ClangSACheckers.h"
+#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -23,18 +24,22 @@ using namespace clang;
using namespace ento;
namespace {
-class CastToStructChecker : public Checker< check::PreStmt<CastExpr> > {
- mutable std::unique_ptr<BuiltinBug> BT;
+class CastToStructVisitor : public RecursiveASTVisitor<CastToStructVisitor> {
+ BugReporter &BR;
+ const CheckerBase *Checker;
+ AnalysisDeclContext *AC;
public:
- void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
+ explicit CastToStructVisitor(BugReporter &B, const CheckerBase *Checker,
+ AnalysisDeclContext *A)
+ : BR(B), Checker(Checker), AC(A) {}
+ bool VisitCastExpr(const CastExpr *CE);
};
}
-void CastToStructChecker::checkPreStmt(const CastExpr *CE,
- CheckerContext &C) const {
+bool CastToStructVisitor::VisitCastExpr(const CastExpr *CE) {
const Expr *E = CE->getSubExpr();
- ASTContext &Ctx = C.getASTContext();
+ ASTContext &Ctx = AC->getASTContext();
QualType OrigTy = Ctx.getCanonicalType(E->getType());
QualType ToTy = Ctx.getCanonicalType(CE->getType());
@@ -42,34 +47,72 @@ void CastToStructChecker::checkPreStmt(const CastExpr *CE,
const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
if (!ToPTy || !OrigPTy)
- return;
+ return true;
QualType OrigPointeeTy = OrigPTy->getPointeeType();
QualType ToPointeeTy = ToPTy->getPointeeType();
if (!ToPointeeTy->isStructureOrClassType())
- return;
+ return true;
// We allow cast from void*.
if (OrigPointeeTy->isVoidType())
- return;
+ return true;
// Now the cast-to-type is struct pointer, the original type is not void*.
if (!OrigPointeeTy->isRecordType()) {
- if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- if (!BT)
- BT.reset(
- new BuiltinBug(this, "Cast from non-struct type to struct type",
- "Casting a non-structure type to a structure type "
- "and accessing a field can lead to memory access "
- "errors or data corruption."));
- auto R = llvm::make_unique<BugReport>(*BT, BT->getDescription(), N);
- R->addRange(CE->getSourceRange());
- C.emitReport(std::move(R));
- }
+ SourceRange Sr[1] = {CE->getSourceRange()};
+ PathDiagnosticLocation Loc(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(
+ AC->getDecl(), Checker, "Cast from non-struct type to struct type",
+ categories::LogicError, "Casting a non-structure type to a structure "
+ "type and accessing a field can lead to memory "
+ "access errors or data corruption.",
+ Loc, Sr);
+ } else {
+ // Don't warn when size of data is unknown.
+ const auto *U = dyn_cast<UnaryOperator>(E);
+ if (!U || U->getOpcode() != UO_AddrOf)
+ return true;
+
+ // Don't warn for references
+ const ValueDecl *VD = nullptr;
+ if (const auto *SE = dyn_cast<DeclRefExpr>(U->getSubExpr()))
+ VD = dyn_cast<ValueDecl>(SE->getDecl());
+ else if (const auto *SE = dyn_cast<MemberExpr>(U->getSubExpr()))
+ VD = SE->getMemberDecl();
+ if (!VD || VD->getType()->isReferenceType())
+ return true;
+
+ // Warn when there is widening cast.
+ unsigned ToWidth = Ctx.getTypeInfo(ToPointeeTy).Width;
+ unsigned OrigWidth = Ctx.getTypeInfo(OrigPointeeTy).Width;
+ if (ToWidth <= OrigWidth)
+ return true;
+
+ PathDiagnosticLocation Loc(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), Checker, "Widening cast to struct type",
+ categories::LogicError,
+ "Casting data to a larger structure type and accessing "
+ "a field can lead to memory access errors or data "
+ "corruption.",
+ Loc, CE->getSourceRange());
}
+
+ return true;
}
+namespace {
+class CastToStructChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const {
+ CastToStructVisitor Visitor(BR, this, Mgr.getAnalysisDeclContext(D));
+ Visitor.TraverseDecl(const_cast<Decl *>(D));
+ }
+};
+} // end anonymous namespace
+
void ento::registerCastToStructChecker(CheckerManager &mgr) {
mgr.registerChecker<CastToStructChecker>();
}
diff --git a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 9e863e79e41f..2818c9d9fd4a 100644
--- a/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -34,6 +34,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
@@ -173,6 +174,7 @@ private:
bool classHasSeparateTeardown(const ObjCInterfaceDecl *ID) const;
bool isReleasedByCIFilterDealloc(const ObjCPropertyImplDecl *PropImpl) const;
+ bool isNibLoadedIvarWithoutRetain(const ObjCPropertyImplDecl *PropImpl) const;
};
} // End anonymous namespace.
@@ -525,7 +527,7 @@ void ObjCDeallocChecker::diagnoseMissingReleases(CheckerContext &C) const {
if (SelfRegion != IvarRegion->getSuperRegion())
continue;
- const ObjCIvarDecl *IvarDecl = IvarRegion->getDecl();
+ const ObjCIvarDecl *IvarDecl = IvarRegion->getDecl();
// Prevent an inlined call to -dealloc in a super class from warning
// about the values the subclass's -dealloc should release.
if (IvarDecl->getContainingInterface() !=
@@ -903,6 +905,9 @@ ReleaseRequirement ObjCDeallocChecker::getDeallocReleaseRequirement(
if (isReleasedByCIFilterDealloc(PropImpl))
return ReleaseRequirement::MustNotReleaseDirectly;
+ if (isNibLoadedIvarWithoutRetain(PropImpl))
+ return ReleaseRequirement::Unknown;
+
return ReleaseRequirement::MustRelease;
case ObjCPropertyDecl::Weak:
@@ -1059,6 +1064,32 @@ bool ObjCDeallocChecker::isReleasedByCIFilterDealloc(
return false;
}
+/// Returns whether the ivar backing the property is an IBOutlet that
+/// has its value set by nib loading code without retaining the value.
+///
+/// On macOS, if there is no setter, the nib-loading code sets the ivar
+/// directly, without retaining the value,
+///
+/// On iOS and its derivatives, the nib-loading code will call
+/// -setValue:forKey:, which retains the value before directly setting the ivar.
+bool ObjCDeallocChecker::isNibLoadedIvarWithoutRetain(
+ const ObjCPropertyImplDecl *PropImpl) const {
+ const ObjCIvarDecl *IvarDecl = PropImpl->getPropertyIvarDecl();
+ if (!IvarDecl->hasAttr<IBOutletAttr>())
+ return false;
+
+ const llvm::Triple &Target =
+ IvarDecl->getASTContext().getTargetInfo().getTriple();
+
+ if (!Target.isMacOSX())
+ return false;
+
+ if (PropImpl->getPropertyDecl()->getSetterMethodDecl())
+ return false;
+
+ return true;
+}
+
void ento::registerObjCDeallocChecker(CheckerManager &Mgr) {
const LangOptions &LangOpts = Mgr.getLangOpts();
// These checker only makes sense under MRR.
diff --git a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
index 74d05e27e8eb..86764c939dcd 100644
--- a/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
+++ b/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -45,6 +45,7 @@ class CheckerDocumentation : public Checker< check::PreStmt<ReturnStmt>,
check::Location,
check::Bind,
check::DeadSymbols,
+ check::BeginFunction,
check::EndFunction,
check::EndAnalysis,
check::EndOfTranslationUnit,
diff --git a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
index 14587fb5163b..9e9939ae25c0 100644
--- a/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -19,7 +19,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
using namespace ento;
diff --git a/lib/StaticAnalyzer/Checkers/CloneChecker.cpp b/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
new file mode 100644
index 000000000000..6fa5732d10cb
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
@@ -0,0 +1,161 @@
+//===--- CloneChecker.cpp - Clone detection checker -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// CloneChecker is a checker that reports clones in the current translation
+/// unit.
+///
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Analysis/CloneDetection.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CloneChecker
+ : public Checker<check::ASTCodeBody, check::EndOfTranslationUnit> {
+ mutable CloneDetector Detector;
+ mutable std::unique_ptr<BugType> BT_Exact, BT_Suspicious;
+
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const;
+
+ void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
+ AnalysisManager &Mgr, BugReporter &BR) const;
+
+ /// \brief Reports all clones to the user.
+ void reportClones(BugReporter &BR, AnalysisManager &Mgr,
+ int MinComplexity) const;
+
+ /// \brief Reports only suspicious clones to the user along with informaton
+ /// that explain why they are suspicious.
+ void reportSuspiciousClones(BugReporter &BR, AnalysisManager &Mgr,
+ int MinComplexity) const;
+};
+} // end anonymous namespace
+
+void CloneChecker::checkASTCodeBody(const Decl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const {
+ // Every statement that should be included in the search for clones needs to
+ // be passed to the CloneDetector.
+ Detector.analyzeCodeBody(D);
+}
+
+void CloneChecker::checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
+ AnalysisManager &Mgr,
+ BugReporter &BR) const {
+ // At this point, every statement in the translation unit has been analyzed by
+ // the CloneDetector. The only thing left to do is to report the found clones.
+
+ int MinComplexity = Mgr.getAnalyzerOptions().getOptionAsInteger(
+ "MinimumCloneComplexity", 10, this);
+ assert(MinComplexity >= 0);
+
+ bool ReportSuspiciousClones = Mgr.getAnalyzerOptions().getBooleanOption(
+ "ReportSuspiciousClones", true, this);
+
+ bool ReportNormalClones = Mgr.getAnalyzerOptions().getBooleanOption(
+ "ReportNormalClones", true, this);
+
+ if (ReportSuspiciousClones)
+ reportSuspiciousClones(BR, Mgr, MinComplexity);
+
+ if (ReportNormalClones)
+ reportClones(BR, Mgr, MinComplexity);
+}
+
+static PathDiagnosticLocation makeLocation(const StmtSequence &S,
+ AnalysisManager &Mgr) {
+ ASTContext &ACtx = Mgr.getASTContext();
+ return PathDiagnosticLocation::createBegin(
+ S.front(), ACtx.getSourceManager(),
+ Mgr.getAnalysisDeclContext(ACtx.getTranslationUnitDecl()));
+}
+
+void CloneChecker::reportClones(BugReporter &BR, AnalysisManager &Mgr,
+ int MinComplexity) const {
+
+ std::vector<CloneDetector::CloneGroup> CloneGroups;
+ Detector.findClones(CloneGroups, MinComplexity);
+
+ if (!BT_Exact)
+ BT_Exact.reset(new BugType(this, "Exact code clone", "Code clone"));
+
+ for (CloneDetector::CloneGroup &Group : CloneGroups) {
+ // We group the clones by printing the first as a warning and all others
+ // as a note.
+ auto R = llvm::make_unique<BugReport>(
+ *BT_Exact, "Duplicate code detected",
+ makeLocation(Group.Sequences.front(), Mgr));
+ R->addRange(Group.Sequences.front().getSourceRange());
+
+ for (unsigned i = 1; i < Group.Sequences.size(); ++i)
+ R->addNote("Similar code here",
+ makeLocation(Group.Sequences[i], Mgr),
+ Group.Sequences[i].getSourceRange());
+ BR.emitReport(std::move(R));
+ }
+}
+
+void CloneChecker::reportSuspiciousClones(BugReporter &BR,
+ AnalysisManager &Mgr,
+ int MinComplexity) const {
+
+ std::vector<CloneDetector::SuspiciousClonePair> Clones;
+ Detector.findSuspiciousClones(Clones, MinComplexity);
+
+ if (!BT_Suspicious)
+ BT_Suspicious.reset(
+ new BugType(this, "Suspicious code clone", "Code clone"));
+
+ ASTContext &ACtx = BR.getContext();
+ SourceManager &SM = ACtx.getSourceManager();
+ AnalysisDeclContext *ADC =
+ Mgr.getAnalysisDeclContext(ACtx.getTranslationUnitDecl());
+
+ for (CloneDetector::SuspiciousClonePair &Pair : Clones) {
+ // FIXME: We are ignoring the suggestions currently, because they are
+ // only 50% accurate (even if the second suggestion is unavailable),
+ // which may confuse the user.
+ // Think how to perform more accurate suggestions?
+
+ auto R = llvm::make_unique<BugReport>(
+ *BT_Suspicious,
+ "Potential copy-paste error; did you really mean to use '" +
+ Pair.FirstCloneInfo.Variable->getNameAsString() + "' here?",
+ PathDiagnosticLocation::createBegin(Pair.FirstCloneInfo.Mention, SM,
+ ADC));
+ R->addRange(Pair.FirstCloneInfo.Mention->getSourceRange());
+
+ R->addNote("Similar code using '" +
+ Pair.SecondCloneInfo.Variable->getNameAsString() + "' here",
+ PathDiagnosticLocation::createBegin(Pair.SecondCloneInfo.Mention,
+ SM, ADC),
+ Pair.SecondCloneInfo.Mention->getSourceRange());
+
+ BR.emitReport(std::move(R));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Register CloneChecker
+//===----------------------------------------------------------------------===//
+
+void ento::registerCloneChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<CloneChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp b/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
new file mode 100644
index 000000000000..2bb9e858731c
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/ConversionChecker.cpp
@@ -0,0 +1,192 @@
+//=== ConversionChecker.cpp -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Check that there is no loss of sign/precision in assignments, comparisons
+// and multiplications.
+//
+// ConversionChecker uses path sensitive analysis to determine possible values
+// of expressions. A warning is reported when:
+// * a negative value is implicitly converted to an unsigned value in an
+// assignment, comparison or multiplication.
+// * assignment / initialization when source value is greater than the max
+// value of target
+//
+// Many compilers and tools have similar checks that are based on semantic
+// analysis. Those checks are sound but have poor precision. ConversionChecker
+// is an alternative to those checks.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ConversionChecker : public Checker<check::PreStmt<ImplicitCastExpr>> {
+public:
+ void checkPreStmt(const ImplicitCastExpr *Cast, CheckerContext &C) const;
+
+private:
+ mutable std::unique_ptr<BuiltinBug> BT;
+
+ // Is there loss of precision
+ bool isLossOfPrecision(const ImplicitCastExpr *Cast, CheckerContext &C) const;
+
+ // Is there loss of sign
+ bool isLossOfSign(const ImplicitCastExpr *Cast, CheckerContext &C) const;
+
+ void reportBug(ExplodedNode *N, CheckerContext &C, const char Msg[]) const;
+};
+}
+
+void ConversionChecker::checkPreStmt(const ImplicitCastExpr *Cast,
+ CheckerContext &C) const {
+ // TODO: For now we only warn about DeclRefExpr, to avoid noise. Warn for
+ // calculations also.
+ if (!isa<DeclRefExpr>(Cast->IgnoreParenImpCasts()))
+ return;
+
+ // Don't warn for loss of sign/precision in macros.
+ if (Cast->getExprLoc().isMacroID())
+ return;
+
+ // Get Parent.
+ const ParentMap &PM = C.getLocationContext()->getParentMap();
+ const Stmt *Parent = PM.getParent(Cast);
+ if (!Parent)
+ return;
+
+ bool LossOfSign = false;
+ bool LossOfPrecision = false;
+
+ // Loss of sign/precision in binary operation.
+ if (const auto *B = dyn_cast<BinaryOperator>(Parent)) {
+ BinaryOperator::Opcode Opc = B->getOpcode();
+ if (Opc == BO_Assign || Opc == BO_AddAssign || Opc == BO_SubAssign ||
+ Opc == BO_MulAssign) {
+ LossOfSign = isLossOfSign(Cast, C);
+ LossOfPrecision = isLossOfPrecision(Cast, C);
+ } else if (B->isRelationalOp() || B->isMultiplicativeOp()) {
+ LossOfSign = isLossOfSign(Cast, C);
+ }
+ } else if (isa<DeclStmt>(Parent)) {
+ LossOfSign = isLossOfSign(Cast, C);
+ LossOfPrecision = isLossOfPrecision(Cast, C);
+ }
+
+ if (LossOfSign || LossOfPrecision) {
+ // Generate an error node.
+ ExplodedNode *N = C.generateNonFatalErrorNode(C.getState());
+ if (!N)
+ return;
+ if (LossOfSign)
+ reportBug(N, C, "Loss of sign in implicit conversion");
+ if (LossOfPrecision)
+ reportBug(N, C, "Loss of precision in implicit conversion");
+ }
+}
+
+void ConversionChecker::reportBug(ExplodedNode *N, CheckerContext &C,
+ const char Msg[]) const {
+ if (!BT)
+ BT.reset(
+ new BuiltinBug(this, "Conversion", "Possible loss of sign/precision."));
+
+ // Generate a report for this bug.
+ auto R = llvm::make_unique<BugReport>(*BT, Msg, N);
+ C.emitReport(std::move(R));
+}
+
+// Is E value greater or equal than Val?
+static bool isGreaterEqual(CheckerContext &C, const Expr *E,
+ unsigned long long Val) {
+ ProgramStateRef State = C.getState();
+ SVal EVal = C.getSVal(E);
+ if (EVal.isUnknownOrUndef() || !EVal.getAs<NonLoc>())
+ return false;
+
+ SValBuilder &Bldr = C.getSValBuilder();
+ DefinedSVal V = Bldr.makeIntVal(Val, C.getASTContext().LongLongTy);
+
+ // Is DefinedEVal greater or equal with V?
+ SVal GE = Bldr.evalBinOp(State, BO_GE, EVal, V, Bldr.getConditionType());
+ if (GE.isUnknownOrUndef())
+ return false;
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef StGE, StLT;
+ std::tie(StGE, StLT) = CM.assumeDual(State, GE.castAs<DefinedSVal>());
+ return StGE && !StLT;
+}
+
+// Is E value negative?
+static bool isNegative(CheckerContext &C, const Expr *E) {
+ ProgramStateRef State = C.getState();
+ SVal EVal = State->getSVal(E, C.getLocationContext());
+ if (EVal.isUnknownOrUndef() || !EVal.getAs<NonLoc>())
+ return false;
+ DefinedSVal DefinedEVal = EVal.castAs<DefinedSVal>();
+
+ SValBuilder &Bldr = C.getSValBuilder();
+ DefinedSVal V = Bldr.makeIntVal(0, false);
+
+ SVal LT =
+ Bldr.evalBinOp(State, BO_LT, DefinedEVal, V, Bldr.getConditionType());
+
+ // Is E value greater than MaxVal?
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef StNegative, StPositive;
+ std::tie(StNegative, StPositive) =
+ CM.assumeDual(State, LT.castAs<DefinedSVal>());
+
+ return StNegative && !StPositive;
+}
+
+bool ConversionChecker::isLossOfPrecision(const ImplicitCastExpr *Cast,
+ CheckerContext &C) const {
+ // Don't warn about explicit loss of precision.
+ if (Cast->isEvaluatable(C.getASTContext()))
+ return false;
+
+ QualType CastType = Cast->getType();
+ QualType SubType = Cast->IgnoreParenImpCasts()->getType();
+
+ if (!CastType->isIntegerType() || !SubType->isIntegerType())
+ return false;
+
+ if (C.getASTContext().getIntWidth(CastType) >=
+ C.getASTContext().getIntWidth(SubType))
+ return false;
+
+ unsigned W = C.getASTContext().getIntWidth(CastType);
+ if (W == 1 || W >= 64U)
+ return false;
+
+ unsigned long long MaxVal = 1ULL << W;
+ return isGreaterEqual(C, Cast->getSubExpr(), MaxVal);
+}
+
+bool ConversionChecker::isLossOfSign(const ImplicitCastExpr *Cast,
+ CheckerContext &C) const {
+ QualType CastType = Cast->getType();
+ QualType SubType = Cast->IgnoreParenImpCasts()->getType();
+
+ if (!CastType->isUnsignedIntegerType() || !SubType->isSignedIntegerType())
+ return false;
+
+ return isNegative(C, Cast->getSubExpr());
+}
+
+void ento::registerConversionChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ConversionChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
index 7e0cb8e93395..a37ebc506d04 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypeChecker.cpp
@@ -107,12 +107,7 @@ PathDiagnosticPiece *DynamicTypeChecker::DynamicTypeBugVisitor::VisitNode(
return nullptr;
// Retrieve the associated statement.
- const Stmt *S = nullptr;
- ProgramPoint ProgLoc = N->getLocation();
- if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
- S = SP->getStmt();
- }
-
+ const Stmt *S = PathDiagnosticLocation::getStmt(N);
if (!S)
return nullptr;
diff --git a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index b8e43325da04..a418c82f5a01 100644
--- a/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -626,7 +626,7 @@ static bool isObjCTypeParamDependent(QualType Type) {
: public RecursiveASTVisitor<IsObjCTypeParamDependentTypeVisitor> {
public:
IsObjCTypeParamDependentTypeVisitor() : Result(false) {}
- bool VisitTypedefType(const TypedefType *Type) {
+ bool VisitObjCTypeParamType(const ObjCTypeParamType *Type) {
if (isa<ObjCTypeParamDecl>(Type->getDecl())) {
Result = true;
return false;
@@ -727,6 +727,37 @@ void DynamicTypePropagation::checkPreObjCMessage(const ObjCMethodCall &M,
if (!Method)
return;
+ // If the method is declared on a class that has a non-invariant
+ // type parameter, don't warn about parameter mismatches after performing
+ // substitution. This prevents warning when the programmer has purposely
+ // casted the receiver to a super type or unspecialized type but the analyzer
+ // has a more precise tracked type than the programmer intends at the call
+ // site.
+ //
+ // For example, consider NSArray (which has a covariant type parameter)
+ // and NSMutableArray (a subclass of NSArray where the type parameter is
+ // invariant):
+ // NSMutableArray *a = [[NSMutableArray<NSString *> alloc] init;
+ //
+ // [a containsObject:number]; // Safe: -containsObject is defined on NSArray.
+ // NSArray<NSObject *> *other = [a arrayByAddingObject:number] // Safe
+ //
+ // [a addObject:number] // Unsafe: -addObject: is defined on NSMutableArray
+ //
+
+ const ObjCInterfaceDecl *Interface = Method->getClassInterface();
+ if (!Interface)
+ return;
+
+ ObjCTypeParamList *TypeParams = Interface->getTypeParamList();
+ if (!TypeParams)
+ return;
+
+ for (ObjCTypeParamDecl *TypeParam : *TypeParams) {
+ if (TypeParam->getVariance() != ObjCTypeParamVariance::Invariant)
+ return;
+ }
+
Optional<ArrayRef<QualType>> TypeArgs =
(*TrackedType)->getObjCSubstitutions(Method->getDeclContext());
// This case might happen when there is an unspecialized override of a
@@ -909,12 +940,7 @@ PathDiagnosticPiece *DynamicTypePropagation::GenericsBugVisitor::VisitNode(
return nullptr;
// Retrieve the associated statement.
- const Stmt *S = nullptr;
- ProgramPoint ProgLoc = N->getLocation();
- if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
- S = SP->getStmt();
- }
-
+ const Stmt *S = PathDiagnosticLocation::getStmt(N);
if (!S)
return nullptr;
diff --git a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 31e9150cc15b..2d5cb60edf7d 100644
--- a/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -18,25 +18,41 @@ using namespace clang;
using namespace ento;
namespace {
-class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols> {
+class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols,
+ check::EndAnalysis> {
mutable std::unique_ptr<BugType> BT;
+ // These stats are per-analysis, not per-branch, hence they shouldn't
+ // stay inside the program state.
+ struct ReachedStat {
+ ExplodedNode *ExampleNode;
+ unsigned NumTimesReached;
+ };
+ mutable llvm::DenseMap<const CallExpr *, ReachedStat> ReachedStats;
+
void analyzerEval(const CallExpr *CE, CheckerContext &C) const;
void analyzerCheckInlined(const CallExpr *CE, CheckerContext &C) const;
void analyzerWarnIfReached(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerNumTimesReached(const CallExpr *CE, CheckerContext &C) const;
void analyzerCrash(const CallExpr *CE, CheckerContext &C) const;
void analyzerWarnOnDeadSymbol(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerDump(const CallExpr *CE, CheckerContext &C) const;
void analyzerExplain(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerPrintState(const CallExpr *CE, CheckerContext &C) const;
void analyzerGetExtent(const CallExpr *CE, CheckerContext &C) const;
typedef void (ExprInspectionChecker::*FnCheck)(const CallExpr *,
CheckerContext &C) const;
- void reportBug(llvm::StringRef Msg, CheckerContext &C) const;
+ ExplodedNode *reportBug(llvm::StringRef Msg, CheckerContext &C) const;
+ ExplodedNode *reportBug(llvm::StringRef Msg, BugReporter &BR,
+ ExplodedNode *N) const;
public:
bool evalCall(const CallExpr *CE, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
+ ExprEngine &Eng) const;
};
}
@@ -56,7 +72,12 @@ bool ExprInspectionChecker::evalCall(const CallExpr *CE,
.Case("clang_analyzer_warnOnDeadSymbol",
&ExprInspectionChecker::analyzerWarnOnDeadSymbol)
.Case("clang_analyzer_explain", &ExprInspectionChecker::analyzerExplain)
+ .Case("clang_analyzer_dump", &ExprInspectionChecker::analyzerDump)
.Case("clang_analyzer_getExtent", &ExprInspectionChecker::analyzerGetExtent)
+ .Case("clang_analyzer_printState",
+ &ExprInspectionChecker::analyzerPrintState)
+ .Case("clang_analyzer_numTimesReached",
+ &ExprInspectionChecker::analyzerNumTimesReached)
.Default(nullptr);
if (!Handler)
@@ -98,16 +119,24 @@ static const char *getArgumentValueString(const CallExpr *CE,
}
}
-void ExprInspectionChecker::reportBug(llvm::StringRef Msg,
- CheckerContext &C) const {
- if (!BT)
- BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
-
+ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
+ CheckerContext &C) const {
ExplodedNode *N = C.generateNonFatalErrorNode();
+ reportBug(Msg, C.getBugReporter(), N);
+ return N;
+}
+
+ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
+ BugReporter &BR,
+ ExplodedNode *N) const {
if (!N)
- return;
+ return nullptr;
+
+ if (!BT)
+ BT.reset(new BugType(this, "Checking analyzer assumptions", "debug"));
- C.emitReport(llvm::make_unique<BugReport>(*BT, Msg, N));
+ BR.emitReport(llvm::make_unique<BugReport>(*BT, Msg, N));
+ return N;
}
void ExprInspectionChecker::analyzerEval(const CallExpr *CE,
@@ -127,6 +156,15 @@ void ExprInspectionChecker::analyzerWarnIfReached(const CallExpr *CE,
reportBug("REACHABLE", C);
}
+void ExprInspectionChecker::analyzerNumTimesReached(const CallExpr *CE,
+ CheckerContext &C) const {
+ ++ReachedStats[CE].NumTimesReached;
+ if (!ReachedStats[CE].ExampleNode) {
+ // Later, in checkEndAnalysis, we'd throw a report against it.
+ ReachedStats[CE].ExampleNode = C.generateNonFatalErrorNode();
+ }
+}
+
void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
CheckerContext &C) const {
const LocationContext *LC = C.getPredecessor()->getLocationContext();
@@ -144,22 +182,43 @@ void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
void ExprInspectionChecker::analyzerExplain(const CallExpr *CE,
CheckerContext &C) const {
- if (CE->getNumArgs() == 0)
+ if (CE->getNumArgs() == 0) {
reportBug("Missing argument for explaining", C);
+ return;
+ }
SVal V = C.getSVal(CE->getArg(0));
SValExplainer Ex(C.getASTContext());
reportBug(Ex.Visit(V), C);
}
+void ExprInspectionChecker::analyzerDump(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (CE->getNumArgs() == 0) {
+ reportBug("Missing argument for dumping", C);
+ return;
+ }
+
+ SVal V = C.getSVal(CE->getArg(0));
+
+ llvm::SmallString<32> Str;
+ llvm::raw_svector_ostream OS(Str);
+ V.dumpToStream(OS);
+ reportBug(OS.str(), C);
+}
+
void ExprInspectionChecker::analyzerGetExtent(const CallExpr *CE,
CheckerContext &C) const {
- if (CE->getNumArgs() == 0)
+ if (CE->getNumArgs() == 0) {
reportBug("Missing region for obtaining extent", C);
+ return;
+ }
auto MR = dyn_cast_or_null<SubRegion>(C.getSVal(CE->getArg(0)).getAsRegion());
- if (!MR)
+ if (!MR) {
reportBug("Obtaining extent of a non-region", C);
+ return;
+ }
ProgramStateRef State = C.getState();
State = State->BindExpr(CE, C.getLocationContext(),
@@ -167,6 +226,11 @@ void ExprInspectionChecker::analyzerGetExtent(const CallExpr *CE,
C.addTransition(State);
}
+void ExprInspectionChecker::analyzerPrintState(const CallExpr *CE,
+ CheckerContext &C) const {
+ C.getState()->dump();
+}
+
void ExprInspectionChecker::analyzerWarnOnDeadSymbol(const CallExpr *CE,
CheckerContext &C) const {
if (CE->getNumArgs() == 0)
@@ -185,15 +249,28 @@ void ExprInspectionChecker::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
const MarkedSymbolsTy &Syms = State->get<MarkedSymbols>();
+ ExplodedNode *N = C.getPredecessor();
for (auto I = Syms.begin(), E = Syms.end(); I != E; ++I) {
SymbolRef Sym = *I;
if (!SymReaper.isDead(Sym))
continue;
- reportBug("SYMBOL DEAD", C);
+ // The non-fatal error node should be the same for all reports.
+ if (ExplodedNode *BugNode = reportBug("SYMBOL DEAD", C))
+ N = BugNode;
State = State->remove<MarkedSymbols>(Sym);
}
- C.addTransition(State);
+ C.addTransition(State, N);
+}
+
+void ExprInspectionChecker::checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
+ ExprEngine &Eng) const {
+ for (auto Item: ReachedStats) {
+ unsigned NumTimesReached = Item.second.NumTimesReached;
+ ExplodedNode *N = Item.second.ExampleNode;
+
+ reportBug(std::to_string(NumTimesReached), BR, N);
+ }
}
void ExprInspectionChecker::analyzerCrash(const CallExpr *CE,
diff --git a/lib/StaticAnalyzer/Checkers/GTestChecker.cpp b/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
new file mode 100644
index 000000000000..f0be41b293e4
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
@@ -0,0 +1,299 @@
+//==- GTestChecker.cpp - Model gtest API --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker models the behavior of un-inlined APIs from the gtest
+// unit-testing library to avoid false positives when using assertions from
+// that library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+// Modeling of un-inlined AssertionResult constructors
+//
+// The gtest unit testing API provides macros for assertions that expand
+// into an if statement that calls a series of constructors and returns
+// when the "assertion" is false.
+//
+// For example,
+//
+// ASSERT_TRUE(a == b)
+//
+// expands into:
+//
+// switch (0)
+// case 0:
+// default:
+// if (const ::testing::AssertionResult gtest_ar_ =
+// ::testing::AssertionResult((a == b)))
+// ;
+// else
+// return ::testing::internal::AssertHelper(
+// ::testing::TestPartResult::kFatalFailure,
+// "<path to project>",
+// <line number>,
+// ::testing::internal::GetBoolAssertionFailureMessage(
+// gtest_ar_, "a == b", "false", "true")
+// .c_str()) = ::testing::Message();
+//
+// where AssertionResult is defined similarly to
+//
+// class AssertionResult {
+// public:
+// AssertionResult(const AssertionResult& other);
+// explicit AssertionResult(bool success) : success_(success) {}
+// operator bool() const { return success_; }
+// ...
+// private:
+// bool success_;
+// };
+//
+// In order for the analyzer to correctly handle this assertion, it needs to
+// know that the boolean value of the expression "a == b" is stored the
+// 'success_' field of the original AssertionResult temporary and propagated
+// (via the copy constructor) into the 'success_' field of the object stored
+// in 'gtest_ar_'. That boolean value will then be returned from the bool
+// conversion method in the if statement. This guarantees that the assertion
+// holds when the return path is not taken.
+//
+// If the success value is not properly propagated, then the eager case split
+// on evaluating the expression can cause pernicious false positives
+// on the non-return path:
+//
+// ASSERT(ptr != NULL)
+// *ptr = 7; // False positive null pointer dereference here
+//
+// Unfortunately, the bool constructor cannot be inlined (because its
+// implementation is not present in the headers) and the copy constructor is
+// not inlined (because it is constructed into a temporary and the analyzer
+// does not inline these since it does not yet reliably call temporary
+// destructors).
+//
+// This checker compensates for the missing inlining by propagating the
+// _success value across the bool and copy constructors so the assertion behaves
+// as expected.
+
+namespace {
+class GTestChecker : public Checker<check::PostCall> {
+
+ mutable IdentifierInfo *AssertionResultII;
+ mutable IdentifierInfo *SuccessII;
+
+public:
+ GTestChecker();
+
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+
+private:
+ void modelAssertionResultBoolConstructor(const CXXConstructorCall *Call,
+ bool IsRef, CheckerContext &C) const;
+
+ void modelAssertionResultCopyConstructor(const CXXConstructorCall *Call,
+ CheckerContext &C) const;
+
+ void initIdentifierInfo(ASTContext &Ctx) const;
+
+ SVal
+ getAssertionResultSuccessFieldValue(const CXXRecordDecl *AssertionResultDecl,
+ SVal Instance,
+ ProgramStateRef State) const;
+
+ static ProgramStateRef assumeValuesEqual(SVal Val1, SVal Val2,
+ ProgramStateRef State,
+ CheckerContext &C);
+};
+} // End anonymous namespace.
+
+GTestChecker::GTestChecker() : AssertionResultII(nullptr), SuccessII(nullptr) {}
+
+/// Model a call to an un-inlined AssertionResult(bool) or
+/// AssertionResult(bool &, ...).
+/// To do so, constrain the value of the newly-constructed instance's 'success_'
+/// field to be equal to the passed-in boolean value.
+///
+/// \param IsRef Whether the boolean parameter is a reference or not.
+void GTestChecker::modelAssertionResultBoolConstructor(
+ const CXXConstructorCall *Call, bool IsRef, CheckerContext &C) const {
+ assert(Call->getNumArgs() >= 1 && Call->getNumArgs() <= 2);
+
+ ProgramStateRef State = C.getState();
+ SVal BooleanArgVal = Call->getArgSVal(0);
+ if (IsRef) {
+ // The argument is a reference, so load from it to get the boolean value.
+ if (!BooleanArgVal.getAs<Loc>())
+ return;
+ BooleanArgVal = C.getState()->getSVal(BooleanArgVal.castAs<Loc>());
+ }
+
+ SVal ThisVal = Call->getCXXThisVal();
+
+ SVal ThisSuccess = getAssertionResultSuccessFieldValue(
+ Call->getDecl()->getParent(), ThisVal, State);
+
+ State = assumeValuesEqual(ThisSuccess, BooleanArgVal, State, C);
+ C.addTransition(State);
+}
+
+/// Model a call to an un-inlined AssertionResult copy constructor:
+///
+/// AssertionResult(const &AssertionResult other)
+///
+/// To do so, constrain the value of the newly-constructed instance's
+/// 'success_' field to be equal to the value of the pass-in instance's
+/// 'success_' field.
+void GTestChecker::modelAssertionResultCopyConstructor(
+ const CXXConstructorCall *Call, CheckerContext &C) const {
+ assert(Call->getNumArgs() == 1);
+
+ // The first parameter of the the copy constructor must be the other
+ // instance to initialize this instances fields from.
+ SVal OtherVal = Call->getArgSVal(0);
+ SVal ThisVal = Call->getCXXThisVal();
+
+ const CXXRecordDecl *AssertResultClassDecl = Call->getDecl()->getParent();
+ ProgramStateRef State = C.getState();
+
+ SVal ThisSuccess = getAssertionResultSuccessFieldValue(AssertResultClassDecl,
+ ThisVal, State);
+ SVal OtherSuccess = getAssertionResultSuccessFieldValue(AssertResultClassDecl,
+ OtherVal, State);
+
+ State = assumeValuesEqual(ThisSuccess, OtherSuccess, State, C);
+ C.addTransition(State);
+}
+
+/// Model calls to AssertionResult constructors that are not inlined.
+void GTestChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ /// If the constructor was inlined, there is no need model it.
+ if (C.wasInlined)
+ return;
+
+ initIdentifierInfo(C.getASTContext());
+
+ auto *CtorCall = dyn_cast<CXXConstructorCall>(&Call);
+ if (!CtorCall)
+ return;
+
+ const CXXConstructorDecl *CtorDecl = CtorCall->getDecl();
+ const CXXRecordDecl *CtorParent = CtorDecl->getParent();
+ if (CtorParent->getIdentifier() != AssertionResultII)
+ return;
+
+ unsigned ParamCount = CtorDecl->getNumParams();
+
+ // Call the appropriate modeling method based the parameters and their
+ // types.
+
+ // We have AssertionResult(const &AssertionResult)
+ if (CtorDecl->isCopyConstructor() && ParamCount == 1) {
+ modelAssertionResultCopyConstructor(CtorCall, C);
+ return;
+ }
+
+ // There are two possible boolean constructors, depending on which
+ // version of gtest is being used:
+ //
+ // v1.7 and earlier:
+ // AssertionResult(bool success)
+ //
+ // v1.8 and greater:
+ // template <typename T>
+ // AssertionResult(const T& success,
+ // typename internal::EnableIf<
+ // !internal::ImplicitlyConvertible<T,
+ // AssertionResult>::value>::type*)
+ //
+ CanQualType BoolTy = C.getASTContext().BoolTy;
+ if (ParamCount == 1 && CtorDecl->getParamDecl(0)->getType() == BoolTy) {
+ // We have AssertionResult(bool)
+ modelAssertionResultBoolConstructor(CtorCall, /*IsRef=*/false, C);
+ return;
+ }
+ if (ParamCount == 2){
+ auto *RefTy = CtorDecl->getParamDecl(0)->getType()->getAs<ReferenceType>();
+ if (RefTy &&
+ RefTy->getPointeeType()->getCanonicalTypeUnqualified() == BoolTy) {
+ // We have AssertionResult(bool &, ...)
+ modelAssertionResultBoolConstructor(CtorCall, /*IsRef=*/true, C);
+ return;
+ }
+ }
+}
+
+void GTestChecker::initIdentifierInfo(ASTContext &Ctx) const {
+ if (AssertionResultII)
+ return;
+
+ AssertionResultII = &Ctx.Idents.get("AssertionResult");
+ SuccessII = &Ctx.Idents.get("success_");
+}
+
+/// Returns the value stored in the 'success_' field of the passed-in
+/// AssertionResult instance.
+SVal GTestChecker::getAssertionResultSuccessFieldValue(
+ const CXXRecordDecl *AssertionResultDecl, SVal Instance,
+ ProgramStateRef State) const {
+
+ DeclContext::lookup_result Result = AssertionResultDecl->lookup(SuccessII);
+ if (Result.empty())
+ return UnknownVal();
+
+ auto *SuccessField = dyn_cast<FieldDecl>(Result.front());
+ if (!SuccessField)
+ return UnknownVal();
+
+ Optional<Loc> FieldLoc =
+ State->getLValue(SuccessField, Instance).getAs<Loc>();
+ if (!FieldLoc.hasValue())
+ return UnknownVal();
+
+ return State->getSVal(*FieldLoc);
+}
+
+/// Constrain the passed-in state to assume two values are equal.
+ProgramStateRef GTestChecker::assumeValuesEqual(SVal Val1, SVal Val2,
+ ProgramStateRef State,
+ CheckerContext &C) {
+ if (!Val1.getAs<DefinedOrUnknownSVal>() ||
+ !Val2.getAs<DefinedOrUnknownSVal>())
+ return State;
+
+ auto ValuesEqual =
+ C.getSValBuilder().evalEQ(State, Val1.castAs<DefinedOrUnknownSVal>(),
+ Val2.castAs<DefinedOrUnknownSVal>());
+
+ if (!ValuesEqual.getAs<DefinedSVal>())
+ return State;
+
+ State = C.getConstraintManager().assume(
+ State, ValuesEqual.castAs<DefinedSVal>(), true);
+
+ return State;
+}
+
+void ento::registerGTestChecker(CheckerManager &Mgr) {
+ const LangOptions &LangOpts = Mgr.getLangOpts();
+ // gtest is a C++ API so there is no sense running the checker
+ // if not compiling for C++.
+ if (!LangOpts.CPlusPlus)
+ return;
+
+ Mgr.registerChecker<GTestChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index 7be2f574f0e9..d1dab6d27d45 100644
--- a/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -19,6 +19,9 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/Lexer.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -26,11 +29,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "clang/Lex/Lexer.h"
-#include "clang/AST/RecursiveASTVisitor.h"
-#include "clang/AST/StmtVisitor.h"
#include "llvm/Support/Unicode.h"
-#include "llvm/ADT/StringSet.h"
using namespace clang;
using namespace ento;
@@ -189,6 +188,22 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
NEW_RECEIVER(NSButton)
ADD_UNARY_METHOD(NSButton, setTitle, 0)
ADD_UNARY_METHOD(NSButton, setAlternateTitle, 0)
+ IdentifierInfo *radioButtonWithTitleNSButton[] = {
+ &Ctx.Idents.get("radioButtonWithTitle"), &Ctx.Idents.get("target"),
+ &Ctx.Idents.get("action")};
+ ADD_METHOD(NSButton, radioButtonWithTitleNSButton, 3, 0)
+ IdentifierInfo *buttonWithTitleNSButtonImage[] = {
+ &Ctx.Idents.get("buttonWithTitle"), &Ctx.Idents.get("image"),
+ &Ctx.Idents.get("target"), &Ctx.Idents.get("action")};
+ ADD_METHOD(NSButton, buttonWithTitleNSButtonImage, 4, 0)
+ IdentifierInfo *checkboxWithTitleNSButton[] = {
+ &Ctx.Idents.get("checkboxWithTitle"), &Ctx.Idents.get("target"),
+ &Ctx.Idents.get("action")};
+ ADD_METHOD(NSButton, checkboxWithTitleNSButton, 3, 0)
+ IdentifierInfo *buttonWithTitleNSButtonTarget[] = {
+ &Ctx.Idents.get("buttonWithTitle"), &Ctx.Idents.get("target"),
+ &Ctx.Idents.get("action")};
+ ADD_METHOD(NSButton, buttonWithTitleNSButtonTarget, 3, 0)
NEW_RECEIVER(NSSavePanel)
ADD_UNARY_METHOD(NSSavePanel, setPrompt, 0)
@@ -271,6 +286,9 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(NSButtonCell, setTitle, 0)
ADD_UNARY_METHOD(NSButtonCell, setAlternateTitle, 0)
+ NEW_RECEIVER(NSDatePickerCell)
+ ADD_UNARY_METHOD(NSDatePickerCell, initTextCell, 0)
+
NEW_RECEIVER(NSSliderCell)
ADD_UNARY_METHOD(NSSliderCell, setTitle, 0)
@@ -336,9 +354,6 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_UNARY_METHOD(UIActionSheet, addButtonWithTitle, 0)
ADD_UNARY_METHOD(UIActionSheet, setTitle, 0)
- NEW_RECEIVER(NSURLSessionTask)
- ADD_UNARY_METHOD(NSURLSessionTask, setTaskDescription, 0)
-
NEW_RECEIVER(UIAccessibilityCustomAction)
IdentifierInfo *initWithNameUIAccessibilityCustomAction[] = {
&Ctx.Idents.get("initWithName"), &Ctx.Idents.get("target"),
@@ -363,6 +378,9 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
NEW_RECEIVER(NSTextField)
ADD_UNARY_METHOD(NSTextField, setPlaceholderString, 0)
+ ADD_UNARY_METHOD(NSTextField, textFieldWithString, 0)
+ ADD_UNARY_METHOD(NSTextField, wrappingLabelWithString, 0)
+ ADD_UNARY_METHOD(NSTextField, labelWithString, 0)
NEW_RECEIVER(NSAttributedString)
ADD_UNARY_METHOD(NSAttributedString, initWithString, 0)
@@ -523,9 +541,6 @@ void NonLocalizedStringChecker::initUIMethods(ASTContext &Ctx) const {
ADD_METHOD(NSUserNotificationAction,
actionWithIdentifierNSUserNotificationAction, 2, 1)
- NEW_RECEIVER(NSURLSession)
- ADD_UNARY_METHOD(NSURLSession, setSessionDescription, 0)
-
NEW_RECEIVER(UITextField)
ADD_UNARY_METHOD(UITextField, setText, 0)
ADD_UNARY_METHOD(UITextField, setPlaceholder, 0)
@@ -1001,6 +1016,8 @@ void EmptyLocalizationContextChecker::checkASTDecl(
void EmptyLocalizationContextChecker::MethodCrawler::VisitObjCMessageExpr(
const ObjCMessageExpr *ME) {
+ // FIXME: We may be able to use PPCallbacks to check for empy context
+ // comments as part of preprocessing and avoid this re-lexing hack.
const ObjCInterfaceDecl *OD = ME->getReceiverInterface();
if (!OD)
return;
@@ -1035,7 +1052,12 @@ void EmptyLocalizationContextChecker::MethodCrawler::VisitObjCMessageExpr(
SE = Mgr.getSourceManager().getSLocEntry(SLInfo.first);
}
- llvm::MemoryBuffer *BF = SE.getFile().getContentCache()->getRawBuffer();
+ bool Invalid = false;
+ llvm::MemoryBuffer *BF =
+ Mgr.getSourceManager().getBuffer(SLInfo.first, SL, &Invalid);
+ if (Invalid)
+ return;
+
Lexer TheLexer(SL, LangOptions(), BF->getBufferStart(),
BF->getBufferStart() + SLInfo.second, BF->getBufferEnd());
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
index 22fbf4c5b303..8474d2d194e8 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIBugReporter.h
@@ -46,9 +46,7 @@ public:
const ExplodedNode *const ExplNode,
BugReporter &BReporter) const;
- /// Report a missing wait for a nonblocking call. A missing wait report
- /// is emitted if a nonblocking call is not matched in the scope of a
- /// function.
+ /// Report a missing wait for a nonblocking call.
///
/// \param Req request that is not matched by a wait
/// \param RequestRegion memory region of the request
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
index c3d0f8f2a129..c667b9e67d4b 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
@@ -43,7 +43,8 @@ void MPIChecker::checkDoubleNonblocking(const CallEvent &PreCallEvent,
// double nonblocking detected
if (Req && Req->CurrentState == Request::State::Nonblocking) {
ExplodedNode *ErrorNode = Ctx.generateNonFatalErrorNode();
- BReporter.reportDoubleNonblocking(PreCallEvent, *Req, MR, ErrorNode, Ctx.getBugReporter());
+ BReporter.reportDoubleNonblocking(PreCallEvent, *Req, MR, ErrorNode,
+ Ctx.getBugReporter());
Ctx.addTransition(ErrorNode->getState(), ErrorNode);
}
// no error
@@ -85,7 +86,8 @@ void MPIChecker::checkUnmatchedWaits(const CallEvent &PreCallEvent,
State = ErrorNode->getState();
}
// A wait has no matching nonblocking call.
- BReporter.reportUnmatchedWait(PreCallEvent, ReqRegion, ErrorNode, Ctx.getBugReporter());
+ BReporter.reportUnmatchedWait(PreCallEvent, ReqRegion, ErrorNode,
+ Ctx.getBugReporter());
}
}
@@ -118,7 +120,8 @@ void MPIChecker::checkMissingWaits(SymbolReaper &SymReaper,
ErrorNode = Ctx.generateNonFatalErrorNode(State, &Tag);
State = ErrorNode->getState();
}
- BReporter.reportMissingWait(Req.second, Req.first, ErrorNode, Ctx.getBugReporter());
+ BReporter.reportMissingWait(Req.second, Req.first, ErrorNode,
+ Ctx.getBugReporter());
}
State = State->remove<RequestMap>(Req.first);
}
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h
index 20c60ad076a2..6b1c062ef3d5 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.h
@@ -19,8 +19,8 @@
#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPICHECKER_H
#include "MPIBugReporter.h"
-#include "MPIFunctionClassifier.h"
#include "MPITypes.h"
+#include "clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
@@ -30,7 +30,7 @@ namespace mpi {
class MPIChecker : public Checker<check::PreCall, check::DeadSymbols> {
public:
- MPIChecker() : BReporter(*this) { }
+ MPIChecker() : BReporter(*this) {}
// path-sensitive callbacks
void checkPreCall(const CallEvent &CE, CheckerContext &Ctx) const {
@@ -49,7 +49,6 @@ public:
return;
const_cast<std::unique_ptr<MPIFunctionClassifier> &>(FuncClassifier)
.reset(new MPIFunctionClassifier{Ctx.getASTContext()});
-
}
/// Checks if a request is used by nonblocking calls multiple times
@@ -60,10 +59,9 @@ public:
void checkDoubleNonblocking(const clang::ento::CallEvent &PreCallEvent,
clang::ento::CheckerContext &Ctx) const;
- /// Checks if a request is used by a wait multiple times in sequence without
- /// intermediate nonblocking call or if the request used by the wait
- /// function was not used at all before. The check contains a guard,
- /// in order to only inspect wait functions.
+ /// Checks if the request used by the wait function was not used at all
+ /// before. The check contains a guard, in order to only inspect wait
+ /// functions.
///
/// \param PreCallEvent MPI call to verify
void checkUnmatchedWaits(const clang::ento::CallEvent &PreCallEvent,
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp
index ad937f683d30..12760abaeeff 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.cpp
@@ -12,7 +12,7 @@
///
//===----------------------------------------------------------------------===//
-#include "MPIFunctionClassifier.h"
+#include "clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h"
#include "llvm/ADT/STLExtras.h"
namespace clang {
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h
deleted file mode 100644
index 65e908912c54..000000000000
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIFunctionClassifier.h
+++ /dev/null
@@ -1,97 +0,0 @@
-//===-- MPIFunctionClassifier.h - classifies MPI functions ----*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file
-/// This file defines functionality to identify and classify MPI functions.
-///
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIFUNCTIONCLASSIFIER_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPIFUNCTIONCLASSIFIER_H
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-
-namespace clang {
-namespace ento {
-namespace mpi {
-
-class MPIFunctionClassifier {
-public:
- MPIFunctionClassifier(ASTContext &ASTCtx) { identifierInit(ASTCtx); }
-
- // general identifiers
- bool isMPIType(const IdentifierInfo *const IdentInfo) const;
- bool isNonBlockingType(const IdentifierInfo *const IdentInfo) const;
-
- // point-to-point identifiers
- bool isPointToPointType(const IdentifierInfo *const IdentInfo) const;
-
- // collective identifiers
- bool isCollectiveType(const IdentifierInfo *const IdentInfo) const;
- bool isCollToColl(const IdentifierInfo *const IdentInfo) const;
- bool isScatterType(const IdentifierInfo *const IdentInfo) const;
- bool isGatherType(const IdentifierInfo *const IdentInfo) const;
- bool isAllgatherType(const IdentifierInfo *const IdentInfo) const;
- bool isAlltoallType(const IdentifierInfo *const IdentInfo) const;
- bool isReduceType(const IdentifierInfo *const IdentInfo) const;
- bool isBcastType(const IdentifierInfo *const IdentInfo) const;
-
- // additional identifiers
- bool isMPI_Wait(const IdentifierInfo *const IdentInfo) const;
- bool isMPI_Waitall(const IdentifierInfo *const IdentInfo) const;
- bool isWaitType(const IdentifierInfo *const IdentInfo) const;
-
-private:
- // Initializes function identifiers, to recognize them during analysis.
- void identifierInit(ASTContext &ASTCtx);
- void initPointToPointIdentifiers(ASTContext &ASTCtx);
- void initCollectiveIdentifiers(ASTContext &ASTCtx);
- void initAdditionalIdentifiers(ASTContext &ASTCtx);
-
- // The containers are used, to enable classification of MPI-functions during
- // analysis.
- llvm::SmallVector<IdentifierInfo *, 12> MPINonBlockingTypes;
-
- llvm::SmallVector<IdentifierInfo *, 10> MPIPointToPointTypes;
- llvm::SmallVector<IdentifierInfo *, 16> MPICollectiveTypes;
-
- llvm::SmallVector<IdentifierInfo *, 4> MPIPointToCollTypes;
- llvm::SmallVector<IdentifierInfo *, 4> MPICollToPointTypes;
- llvm::SmallVector<IdentifierInfo *, 6> MPICollToCollTypes;
-
- llvm::SmallVector<IdentifierInfo *, 32> MPIType;
-
- // point-to-point functions
- IdentifierInfo *IdentInfo_MPI_Send = nullptr, *IdentInfo_MPI_Isend = nullptr,
- *IdentInfo_MPI_Ssend = nullptr, *IdentInfo_MPI_Issend = nullptr,
- *IdentInfo_MPI_Bsend = nullptr, *IdentInfo_MPI_Ibsend = nullptr,
- *IdentInfo_MPI_Rsend = nullptr, *IdentInfo_MPI_Irsend = nullptr,
- *IdentInfo_MPI_Recv = nullptr, *IdentInfo_MPI_Irecv = nullptr;
-
- // collective functions
- IdentifierInfo *IdentInfo_MPI_Scatter = nullptr,
- *IdentInfo_MPI_Iscatter = nullptr, *IdentInfo_MPI_Gather = nullptr,
- *IdentInfo_MPI_Igather = nullptr, *IdentInfo_MPI_Allgather = nullptr,
- *IdentInfo_MPI_Iallgather = nullptr, *IdentInfo_MPI_Bcast = nullptr,
- *IdentInfo_MPI_Ibcast = nullptr, *IdentInfo_MPI_Reduce = nullptr,
- *IdentInfo_MPI_Ireduce = nullptr, *IdentInfo_MPI_Allreduce = nullptr,
- *IdentInfo_MPI_Iallreduce = nullptr, *IdentInfo_MPI_Alltoall = nullptr,
- *IdentInfo_MPI_Ialltoall = nullptr, *IdentInfo_MPI_Barrier = nullptr;
-
- // additional functions
- IdentifierInfo *IdentInfo_MPI_Comm_rank = nullptr,
- *IdentInfo_MPI_Comm_size = nullptr, *IdentInfo_MPI_Wait = nullptr,
- *IdentInfo_MPI_Waitall = nullptr;
-};
-
-} // end of namespace: mpi
-} // end of namespace: ento
-} // end of namespace: clang
-
-#endif
diff --git a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h
index 27ec950d31eb..2e7140cd771e 100644
--- a/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h
+++ b/lib/StaticAnalyzer/Checkers/MPI-Checker/MPITypes.h
@@ -17,7 +17,7 @@
#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPITYPES_H
#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_MPICHECKER_MPITYPES_H
-#include "MPIFunctionClassifier.h"
+#include "clang/StaticAnalyzer/Checkers/MPIFunctionClassifier.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "llvm/ADT/SmallSet.h"
@@ -53,7 +53,6 @@ typedef llvm::ImmutableMap<const clang::ento::MemRegion *,
} // end of namespace: mpi
-
template <>
struct ProgramStateTrait<mpi::RequestMap>
: public ProgramStatePartialTrait<mpi::RequestMapImpl> {
diff --git a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
index 1e56d709e4f9..86c827045e9a 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -524,12 +524,7 @@ MacOSKeychainAPIChecker::generateAllocatedDataNotReleasedReport(
// allocated, and only report a single path.
PathDiagnosticLocation LocUsedForUniqueing;
const ExplodedNode *AllocNode = getAllocationNode(N, AP.first, C);
- const Stmt *AllocStmt = nullptr;
- ProgramPoint P = AllocNode->getLocation();
- if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
- AllocStmt = Exit->getCalleeContext()->getCallSite();
- else if (Optional<clang::PostStmt> PS = P.getAs<clang::PostStmt>())
- AllocStmt = PS->getStmt();
+ const Stmt *AllocStmt = PathDiagnosticLocation::getStmt(AllocNode);
if (AllocStmt)
LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocStmt,
diff --git a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
index c038a2649e15..0e0f52af3165 100644
--- a/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -33,6 +33,8 @@ namespace {
class MacOSXAPIChecker : public Checker< check::PreStmt<CallExpr> > {
mutable std::unique_ptr<BugType> BT_dispatchOnce;
+ static const ObjCIvarRegion *getParentIvarRegion(const MemRegion *R);
+
public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
@@ -49,27 +51,34 @@ public:
// dispatch_once and dispatch_once_f
//===----------------------------------------------------------------------===//
+const ObjCIvarRegion *
+MacOSXAPIChecker::getParentIvarRegion(const MemRegion *R) {
+ const SubRegion *SR = dyn_cast<SubRegion>(R);
+ while (SR) {
+ if (const ObjCIvarRegion *IR = dyn_cast<ObjCIvarRegion>(SR))
+ return IR;
+ SR = dyn_cast<SubRegion>(SR->getSuperRegion());
+ }
+ return nullptr;
+}
+
void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
StringRef FName) const {
if (CE->getNumArgs() < 1)
return;
- // Check if the first argument is stack allocated. If so, issue a warning
- // because that's likely to be bad news.
- ProgramStateRef state = C.getState();
- const MemRegion *R =
- state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
- if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
+ // Check if the first argument is improperly allocated. If so, issue a
+ // warning because that's likely to be bad news.
+ const MemRegion *R = C.getSVal(CE->getArg(0)).getAsRegion();
+ if (!R)
return;
- ExplodedNode *N = C.generateErrorNode(state);
- if (!N)
+ // Global variables are fine.
+ const MemRegion *RB = R->getBaseRegion();
+ const MemSpaceRegion *RS = RB->getMemorySpace();
+ if (isa<GlobalsSpaceRegion>(RS))
return;
- if (!BT_dispatchOnce)
- BT_dispatchOnce.reset(new BugType(this, "Improper use of 'dispatch_once'",
- "API Misuse (Apple)"));
-
// Handle _dispatch_once. In some versions of the OS X SDK we have the case
// that dispatch_once is a macro that wraps a call to _dispatch_once.
// _dispatch_once is then a function which then calls the real dispatch_once.
@@ -82,16 +91,48 @@ void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
SmallString<256> S;
llvm::raw_svector_ostream os(S);
+ bool SuggestStatic = false;
os << "Call to '" << FName << "' uses";
- if (const VarRegion *VR = dyn_cast<VarRegion>(R))
- os << " the local variable '" << VR->getDecl()->getName() << '\'';
- else
+ if (const VarRegion *VR = dyn_cast<VarRegion>(RB)) {
+ // We filtered out globals earlier, so it must be a local variable
+ // or a block variable which is under UnknownSpaceRegion.
+ if (VR != R)
+ os << " memory within";
+ if (VR->getDecl()->hasAttr<BlocksAttr>())
+ os << " the block variable '";
+ else
+ os << " the local variable '";
+ os << VR->getDecl()->getName() << '\'';
+ SuggestStatic = true;
+ } else if (const ObjCIvarRegion *IVR = getParentIvarRegion(R)) {
+ if (IVR != R)
+ os << " memory within";
+ os << " the instance variable '" << IVR->getDecl()->getName() << '\'';
+ } else if (isa<HeapSpaceRegion>(RS)) {
+ os << " heap-allocated memory";
+ } else if (isa<UnknownSpaceRegion>(RS)) {
+ // Presence of an IVar superregion has priority over this branch, because
+ // ObjC objects are on the heap even if the core doesn't realize this.
+ // Presence of a block variable base region has priority over this branch,
+ // because block variables are known to be either on stack or on heap
+ // (might actually move between the two, hence UnknownSpace).
+ return;
+ } else {
os << " stack allocated memory";
+ }
os << " for the predicate value. Using such transient memory for "
"the predicate is potentially dangerous.";
- if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
+ if (SuggestStatic)
os << " Perhaps you intended to declare the variable as 'static'?";
+ ExplodedNode *N = C.generateErrorNode();
+ if (!N)
+ return;
+
+ if (!BT_dispatchOnce)
+ BT_dispatchOnce.reset(new BugType(this, "Improper use of 'dispatch_once'",
+ "API Misuse (Apple)"));
+
auto report = llvm::make_unique<BugReport>(*BT_dispatchOnce, os.str(), N);
report->addRange(CE->getArg(0)->getSourceRange());
C.emitReport(std::move(report));
diff --git a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index e06662b16934..f7c4ea10c438 100644
--- a/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -26,7 +26,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
@@ -288,6 +287,9 @@ private:
ProgramStateRef State,
AllocationFamily Family = AF_Malloc);
+ static ProgramStateRef addExtentSize(CheckerContext &C, const CXXNewExpr *NE,
+ ProgramStateRef State);
+
// Check if this malloc() for special flags. At present that means M_ZERO or
// __GFP_ZERO (in which case, treat it like calloc).
llvm::Optional<ProgramStateRef>
@@ -776,6 +778,8 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
}
} else if (FunI == II_kmalloc) {
+ if (CE->getNumArgs() < 1)
+ return;
llvm::Optional<ProgramStateRef> MaybeState =
performKernelMalloc(CE, C, State);
if (MaybeState.hasValue())
@@ -805,6 +809,8 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
} else if (FunI == II_strndup) {
State = MallocUpdateRefState(C, CE, State);
} else if (FunI == II_alloca || FunI == II_win_alloca) {
+ if (CE->getNumArgs() < 1)
+ return;
State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State,
AF_Alloca);
State = ProcessZeroAllocation(C, CE, 0, State);
@@ -982,10 +988,58 @@ void MallocChecker::checkPostStmt(const CXXNewExpr *NE,
// existing binding.
State = MallocUpdateRefState(C, NE, State, NE->isArray() ? AF_CXXNewArray
: AF_CXXNew);
+ State = addExtentSize(C, NE, State);
State = ProcessZeroAllocation(C, NE, 0, State);
C.addTransition(State);
}
+// Sets the extent value of the MemRegion allocated by
+// new expression NE to its size in Bytes.
+//
+ProgramStateRef MallocChecker::addExtentSize(CheckerContext &C,
+ const CXXNewExpr *NE,
+ ProgramStateRef State) {
+ if (!State)
+ return nullptr;
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ SVal ElementCount;
+ const LocationContext *LCtx = C.getLocationContext();
+ const SubRegion *Region;
+ if (NE->isArray()) {
+ const Expr *SizeExpr = NE->getArraySize();
+ ElementCount = State->getSVal(SizeExpr, C.getLocationContext());
+ // Store the extent size for the (symbolic)region
+ // containing the elements.
+ Region = (State->getSVal(NE, LCtx))
+ .getAsRegion()
+ ->getAs<SubRegion>()
+ ->getSuperRegion()
+ ->getAs<SubRegion>();
+ } else {
+ ElementCount = svalBuilder.makeIntVal(1, true);
+ Region = (State->getSVal(NE, LCtx)).getAsRegion()->getAs<SubRegion>();
+ }
+ assert(Region);
+
+ // Set the region's extent equal to the Size in Bytes.
+ QualType ElementType = NE->getAllocatedType();
+ ASTContext &AstContext = C.getASTContext();
+ CharUnits TypeSize = AstContext.getTypeSizeInChars(ElementType);
+
+ if (ElementCount.getAs<NonLoc>()) {
+ DefinedOrUnknownSVal Extent = Region->getExtent(svalBuilder);
+ // size in Bytes = ElementCount*TypeSize
+ SVal SizeInBytes = svalBuilder.evalBinOpNN(
+ State, BO_Mul, ElementCount.castAs<NonLoc>(),
+ svalBuilder.makeArrayIndex(TypeSize.getQuantity()),
+ svalBuilder.getArrayIndexType());
+ DefinedOrUnknownSVal extentMatchesSize = svalBuilder.evalEQ(
+ State, Extent, SizeInBytes.castAs<DefinedOrUnknownSVal>());
+ State = State->assume(extentMatchesSize, true);
+ }
+ return State;
+}
+
void MallocChecker::checkPreStmt(const CXXDeleteExpr *DE,
CheckerContext &C) const {
@@ -2095,12 +2149,7 @@ void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
const MemRegion *Region = nullptr;
std::tie(AllocNode, Region) = getAllocationSite(N, Sym, C);
- ProgramPoint P = AllocNode->getLocation();
- const Stmt *AllocationStmt = nullptr;
- if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
- AllocationStmt = Exit->getCalleeContext()->getCallSite();
- else if (Optional<StmtPoint> SP = P.getAs<StmtPoint>())
- AllocationStmt = SP->getStmt();
+ const Stmt *AllocationStmt = PathDiagnosticLocation::getStmt(AllocNode);
if (AllocationStmt)
LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocationStmt,
C.getSourceManager(),
@@ -2529,6 +2578,11 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
return true;
}
+ if (FName == "connectImpl" &&
+ FD->getQualifiedNameAsString() == "QObject::connectImpl") {
+ return true;
+ }
+
// Handle cases where we know a buffer's /address/ can escape.
// Note that the above checks handle some special cases where we know that
// even though the address escapes, it's still our responsibility to free the
@@ -2627,22 +2681,7 @@ MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
if (!RS)
return nullptr;
- const Stmt *S = nullptr;
- const char *Msg = nullptr;
- StackHintGeneratorForSymbol *StackHint = nullptr;
-
- // Retrieve the associated statement.
- ProgramPoint ProgLoc = N->getLocation();
- if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
- S = SP->getStmt();
- } else if (Optional<CallExitEnd> Exit = ProgLoc.getAs<CallExitEnd>()) {
- S = Exit->getCalleeContext()->getCallSite();
- } else if (Optional<BlockEdge> Edge = ProgLoc.getAs<BlockEdge>()) {
- // If an assumption was made on a branch, it should be caught
- // here by looking at the state transition.
- S = Edge->getSrc()->getTerminator();
- }
-
+ const Stmt *S = PathDiagnosticLocation::getStmt(N);
if (!S)
return nullptr;
@@ -2650,6 +2689,8 @@ MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
// (__attribute__((cleanup))).
// Find out if this is an interesting point and what is the kind.
+ const char *Msg = nullptr;
+ StackHintGeneratorForSymbol *StackHint = nullptr;
if (Mode == Normal) {
if (isAllocated(RS, RSPrev, S)) {
Msg = "Memory is allocated";
diff --git a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index d7ec6b10c6f7..d96017a1f532 100644
--- a/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -325,10 +325,7 @@ PathDiagnosticPiece *NullabilityChecker::NullabilityBugVisitor::VisitNode(
// Retrieve the associated statement.
const Stmt *S = TrackedNullab->getNullabilitySource();
if (!S) {
- ProgramPoint ProgLoc = N->getLocation();
- if (Optional<StmtPoint> SP = ProgLoc.getAs<StmtPoint>()) {
- S = SP->getStmt();
- }
+ S = PathDiagnosticLocation::getStmt(N);
}
if (!S)
@@ -336,7 +333,7 @@ PathDiagnosticPiece *NullabilityChecker::NullabilityBugVisitor::VisitNode(
std::string InfoText =
(llvm::Twine("Nullability '") +
- getNullabilityString(TrackedNullab->getValue()) + "' is infered")
+ getNullabilityString(TrackedNullab->getValue()) + "' is inferred")
.str();
// Generate the extra diagnostic.
@@ -613,9 +610,9 @@ void NullabilityChecker::checkPreStmt(const ReturnStmt *S,
SmallString<256> SBuf;
llvm::raw_svector_ostream OS(SBuf);
- OS << "Null is returned from a " << C.getDeclDescription(D) <<
+ OS << (RetExpr->getType()->isObjCObjectPointerType() ? "nil" : "Null");
+ OS << " returned from a " << C.getDeclDescription(D) <<
" that is expected to return a non-null value";
-
reportBugIfInvariantHolds(OS.str(),
ErrorKind::NilReturnedToNonnull, N, nullptr, C,
RetExpr);
@@ -682,9 +679,10 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
if (Param->isParameterPack())
break;
- const Expr *ArgExpr = nullptr;
- if (Idx < Call.getNumArgs())
- ArgExpr = Call.getArgExpr(Idx);
+ if (Idx >= Call.getNumArgs())
+ break;
+
+ const Expr *ArgExpr = Call.getArgExpr(Idx);
auto ArgSVal = Call.getArgSVal(Idx++).getAs<DefinedOrUnknownSVal>();
if (!ArgSVal)
continue;
@@ -709,9 +707,11 @@ void NullabilityChecker::checkPreCall(const CallEvent &Call,
ExplodedNode *N = C.generateErrorNode(State);
if (!N)
return;
+
SmallString<256> SBuf;
llvm::raw_svector_ostream OS(SBuf);
- OS << "Null passed to a callee that requires a non-null " << ParamIdx
+ OS << (Param->getType()->isObjCObjectPointerType() ? "nil" : "Null");
+ OS << " passed to a callee that requires a non-null " << ParamIdx
<< llvm::getOrdinalSuffix(ParamIdx) << " parameter";
reportBugIfInvariantHolds(OS.str(), ErrorKind::NilPassedToNonnull, N,
nullptr, C,
@@ -1130,8 +1130,11 @@ void NullabilityChecker::checkBind(SVal L, SVal V, const Stmt *S,
if (ValueExpr)
ValueStmt = ValueExpr;
- reportBugIfInvariantHolds("Null is assigned to a pointer which is "
- "expected to have non-null value",
+ SmallString<256> SBuf;
+ llvm::raw_svector_ostream OS(SBuf);
+ OS << (LocType->isObjCObjectPointerType() ? "nil" : "Null");
+ OS << " assigned to a pointer which is expected to have non-null value";
+ reportBugIfInvariantHolds(OS.str(),
ErrorKind::NilAssignedToNonnull, N, nullptr, C,
ValueStmt);
return;
diff --git a/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
new file mode 100644
index 000000000000..40e379cb2efc
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -0,0 +1,348 @@
+//===- NumberObjectConversionChecker.cpp -------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines NumberObjectConversionChecker, which checks for a
+// particular common mistake when dealing with numbers represented as objects
+// passed around by pointers. Namely, the language allows to reinterpret the
+// pointer as a number directly, often without throwing any warnings,
+// but in most cases the result of such conversion is clearly unexpected,
+// as pointer value, rather than number value represented by the pointee object,
+// becomes the result of such operation.
+//
+// Currently the checker supports the Objective-C NSNumber class,
+// and the OSBoolean class found in macOS low-level code; the latter
+// can only hold boolean values.
+//
+// This checker has an option "Pedantic" (boolean), which enables detection of
+// more conversion patterns (which are most likely more harmless, and therefore
+// are more likely to produce false positives) - disabled by default,
+// enabled with `-analyzer-config osx.NumberObjectConversion:Pedantic=true'.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/APSInt.h"
+
+using namespace clang;
+using namespace ento;
+using namespace ast_matchers;
+
+namespace {
+
+class NumberObjectConversionChecker : public Checker<check::ASTCodeBody> {
+public:
+ bool Pedantic;
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager &AM,
+ BugReporter &BR) const;
+};
+
+class Callback : public MatchFinder::MatchCallback {
+ const NumberObjectConversionChecker *C;
+ BugReporter &BR;
+ AnalysisDeclContext *ADC;
+
+public:
+ Callback(const NumberObjectConversionChecker *C,
+ BugReporter &BR, AnalysisDeclContext *ADC)
+ : C(C), BR(BR), ADC(ADC) {}
+ virtual void run(const MatchFinder::MatchResult &Result);
+};
+} // end of anonymous namespace
+
+void Callback::run(const MatchFinder::MatchResult &Result) {
+ bool IsPedanticMatch =
+ (Result.Nodes.getNodeAs<Stmt>("pedantic") != nullptr);
+ if (IsPedanticMatch && !C->Pedantic)
+ return;
+
+ ASTContext &ACtx = ADC->getASTContext();
+
+ if (const Expr *CheckIfNull =
+ Result.Nodes.getNodeAs<Expr>("check_if_null")) {
+ // Unless the macro indicates that the intended type is clearly not
+ // a pointer type, we should avoid warning on comparing pointers
+ // to zero literals in non-pedantic mode.
+ // FIXME: Introduce an AST matcher to implement the macro-related logic?
+ bool MacroIndicatesWeShouldSkipTheCheck = false;
+ SourceLocation Loc = CheckIfNull->getLocStart();
+ if (Loc.isMacroID()) {
+ StringRef MacroName = Lexer::getImmediateMacroName(
+ Loc, ACtx.getSourceManager(), ACtx.getLangOpts());
+ if (MacroName == "NULL" || MacroName == "nil")
+ return;
+ if (MacroName == "YES" || MacroName == "NO")
+ MacroIndicatesWeShouldSkipTheCheck = true;
+ }
+ if (!MacroIndicatesWeShouldSkipTheCheck) {
+ llvm::APSInt Result;
+ if (CheckIfNull->IgnoreParenCasts()->EvaluateAsInt(
+ Result, ACtx, Expr::SE_AllowSideEffects)) {
+ if (Result == 0) {
+ if (!C->Pedantic)
+ return;
+ IsPedanticMatch = true;
+ }
+ }
+ }
+ }
+
+ const Stmt *Conv = Result.Nodes.getNodeAs<Stmt>("conv");
+ assert(Conv);
+
+ const Expr *ConvertedCObject = Result.Nodes.getNodeAs<Expr>("c_object");
+ const Expr *ConvertedCppObject = Result.Nodes.getNodeAs<Expr>("cpp_object");
+ const Expr *ConvertedObjCObject = Result.Nodes.getNodeAs<Expr>("objc_object");
+ bool IsCpp = (ConvertedCppObject != nullptr);
+ bool IsObjC = (ConvertedObjCObject != nullptr);
+ const Expr *Obj = IsObjC ? ConvertedObjCObject
+ : IsCpp ? ConvertedCppObject
+ : ConvertedCObject;
+ assert(Obj);
+
+ bool IsComparison =
+ (Result.Nodes.getNodeAs<Stmt>("comparison") != nullptr);
+
+ bool IsOSNumber =
+ (Result.Nodes.getNodeAs<Decl>("osnumber") != nullptr);
+
+ bool IsInteger =
+ (Result.Nodes.getNodeAs<QualType>("int_type") != nullptr);
+ bool IsObjCBool =
+ (Result.Nodes.getNodeAs<QualType>("objc_bool_type") != nullptr);
+ bool IsCppBool =
+ (Result.Nodes.getNodeAs<QualType>("cpp_bool_type") != nullptr);
+
+ llvm::SmallString<64> Msg;
+ llvm::raw_svector_ostream OS(Msg);
+
+ // Remove ObjC ARC qualifiers.
+ QualType ObjT = Obj->getType().getUnqualifiedType();
+
+ // Remove consts from pointers.
+ if (IsCpp) {
+ assert(ObjT.getCanonicalType()->isPointerType());
+ ObjT = ACtx.getPointerType(
+ ObjT->getPointeeType().getCanonicalType().getUnqualifiedType());
+ }
+
+ if (IsComparison)
+ OS << "Comparing ";
+ else
+ OS << "Converting ";
+
+ OS << "a pointer value of type '" << ObjT.getAsString() << "' to a ";
+
+ std::string EuphemismForPlain = "primitive";
+ std::string SuggestedApi = IsObjC ? (IsInteger ? "" : "-boolValue")
+ : IsCpp ? (IsOSNumber ? "" : "getValue()")
+ : "CFNumberGetValue()";
+ if (SuggestedApi.empty()) {
+ // A generic message if we're not sure what API should be called.
+ // FIXME: Pattern-match the integer type to make a better guess?
+ SuggestedApi =
+ "a method on '" + ObjT.getAsString() + "' to get the scalar value";
+ // "scalar" is not quite correct or common, but some documentation uses it
+ // when describing object methods we suggest. For consistency, we use
+ // "scalar" in the whole sentence when we need to use this word in at least
+ // one place, otherwise we use "primitive".
+ EuphemismForPlain = "scalar";
+ }
+
+ if (IsInteger)
+ OS << EuphemismForPlain << " integer value";
+ else if (IsObjCBool)
+ OS << EuphemismForPlain << " BOOL value";
+ else if (IsCppBool)
+ OS << EuphemismForPlain << " bool value";
+ else // Branch condition?
+ OS << EuphemismForPlain << " boolean value";
+
+
+ if (IsPedanticMatch)
+ OS << "; instead, either compare the pointer to "
+ << (IsObjC ? "nil" : IsCpp ? "nullptr" : "NULL") << " or ";
+ else
+ OS << "; did you mean to ";
+
+ if (IsComparison)
+ OS << "compare the result of calling " << SuggestedApi;
+ else
+ OS << "call " << SuggestedApi;
+
+ if (!IsPedanticMatch)
+ OS << "?";
+
+ BR.EmitBasicReport(
+ ADC->getDecl(), C, "Suspicious number object conversion", "Logic error",
+ OS.str(),
+ PathDiagnosticLocation::createBegin(Obj, BR.getSourceManager(), ADC),
+ Conv->getSourceRange());
+}
+
+void NumberObjectConversionChecker::checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
+ BugReporter &BR) const {
+ // Currently this matches CoreFoundation opaque pointer typedefs.
+ auto CSuspiciousNumberObjectExprM =
+ expr(ignoringParenImpCasts(
+ expr(hasType(
+ typedefType(hasDeclaration(anyOf(
+ typedefDecl(hasName("CFNumberRef")),
+ typedefDecl(hasName("CFBooleanRef")))))))
+ .bind("c_object")));
+
+ // Currently this matches XNU kernel number-object pointers.
+ auto CppSuspiciousNumberObjectExprM =
+ expr(ignoringParenImpCasts(
+ expr(hasType(hasCanonicalType(
+ pointerType(pointee(hasCanonicalType(
+ recordType(hasDeclaration(
+ anyOf(
+ cxxRecordDecl(hasName("OSBoolean")),
+ cxxRecordDecl(hasName("OSNumber"))
+ .bind("osnumber"))))))))))
+ .bind("cpp_object")));
+
+ // Currently this matches NeXTSTEP number objects.
+ auto ObjCSuspiciousNumberObjectExprM =
+ expr(ignoringParenImpCasts(
+ expr(hasType(hasCanonicalType(
+ objcObjectPointerType(pointee(
+ qualType(hasCanonicalType(
+ qualType(hasDeclaration(
+ objcInterfaceDecl(hasName("NSNumber")))))))))))
+ .bind("objc_object")));
+
+ auto SuspiciousNumberObjectExprM = anyOf(
+ CSuspiciousNumberObjectExprM,
+ CppSuspiciousNumberObjectExprM,
+ ObjCSuspiciousNumberObjectExprM);
+
+ // Useful for predicates like "Unless we've seen the same object elsewhere".
+ auto AnotherSuspiciousNumberObjectExprM =
+ expr(anyOf(
+ equalsBoundNode("c_object"),
+ equalsBoundNode("objc_object"),
+ equalsBoundNode("cpp_object")));
+
+ // The .bind here is in order to compose the error message more accurately.
+ auto ObjCSuspiciousScalarBooleanTypeM =
+ qualType(typedefType(hasDeclaration(
+ typedefDecl(hasName("BOOL"))))).bind("objc_bool_type");
+
+ // The .bind here is in order to compose the error message more accurately.
+ auto SuspiciousScalarBooleanTypeM =
+ qualType(anyOf(qualType(booleanType()).bind("cpp_bool_type"),
+ ObjCSuspiciousScalarBooleanTypeM));
+
+ // The .bind here is in order to compose the error message more accurately.
+ // Also avoid intptr_t and uintptr_t because they were specifically created
+ // for storing pointers.
+ auto SuspiciousScalarNumberTypeM =
+ qualType(hasCanonicalType(isInteger()),
+ unless(typedefType(hasDeclaration(
+ typedefDecl(matchesName("^::u?intptr_t$"))))))
+ .bind("int_type");
+
+ auto SuspiciousScalarTypeM =
+ qualType(anyOf(SuspiciousScalarBooleanTypeM,
+ SuspiciousScalarNumberTypeM));
+
+ auto SuspiciousScalarExprM =
+ expr(ignoringParenImpCasts(expr(hasType(SuspiciousScalarTypeM))));
+
+ auto ConversionThroughAssignmentM =
+ binaryOperator(allOf(hasOperatorName("="),
+ hasLHS(SuspiciousScalarExprM),
+ hasRHS(SuspiciousNumberObjectExprM)));
+
+ auto ConversionThroughBranchingM =
+ ifStmt(hasCondition(SuspiciousNumberObjectExprM))
+ .bind("pedantic");
+
+ auto ConversionThroughCallM =
+ callExpr(hasAnyArgument(allOf(hasType(SuspiciousScalarTypeM),
+ ignoringParenImpCasts(
+ SuspiciousNumberObjectExprM))));
+
+ // We bind "check_if_null" to modify the warning message
+ // in case it was intended to compare a pointer to 0 with a relatively-ok
+ // construct "x == 0" or "x != 0".
+ auto ConversionThroughEquivalenceM =
+ binaryOperator(allOf(anyOf(hasOperatorName("=="), hasOperatorName("!=")),
+ hasEitherOperand(SuspiciousNumberObjectExprM),
+ hasEitherOperand(SuspiciousScalarExprM
+ .bind("check_if_null"))))
+ .bind("comparison");
+
+ auto ConversionThroughComparisonM =
+ binaryOperator(allOf(anyOf(hasOperatorName(">="), hasOperatorName(">"),
+ hasOperatorName("<="), hasOperatorName("<")),
+ hasEitherOperand(SuspiciousNumberObjectExprM),
+ hasEitherOperand(SuspiciousScalarExprM)))
+ .bind("comparison");
+
+ auto ConversionThroughConditionalOperatorM =
+ conditionalOperator(allOf(
+ hasCondition(SuspiciousNumberObjectExprM),
+ unless(hasTrueExpression(
+ hasDescendant(AnotherSuspiciousNumberObjectExprM))),
+ unless(hasFalseExpression(
+ hasDescendant(AnotherSuspiciousNumberObjectExprM)))))
+ .bind("pedantic");
+
+ auto ConversionThroughExclamationMarkM =
+ unaryOperator(allOf(hasOperatorName("!"),
+ has(expr(SuspiciousNumberObjectExprM))))
+ .bind("pedantic");
+
+ auto ConversionThroughExplicitBooleanCastM =
+ explicitCastExpr(allOf(hasType(SuspiciousScalarBooleanTypeM),
+ has(expr(SuspiciousNumberObjectExprM))));
+
+ auto ConversionThroughExplicitNumberCastM =
+ explicitCastExpr(allOf(hasType(SuspiciousScalarNumberTypeM),
+ has(expr(SuspiciousNumberObjectExprM))));
+
+ auto ConversionThroughInitializerM =
+ declStmt(hasSingleDecl(
+ varDecl(hasType(SuspiciousScalarTypeM),
+ hasInitializer(SuspiciousNumberObjectExprM))));
+
+ auto FinalM = stmt(anyOf(ConversionThroughAssignmentM,
+ ConversionThroughBranchingM,
+ ConversionThroughCallM,
+ ConversionThroughComparisonM,
+ ConversionThroughConditionalOperatorM,
+ ConversionThroughEquivalenceM,
+ ConversionThroughExclamationMarkM,
+ ConversionThroughExplicitBooleanCastM,
+ ConversionThroughExplicitNumberCastM,
+ ConversionThroughInitializerM)).bind("conv");
+
+ MatchFinder F;
+ Callback CB(this, BR, AM.getAnalysisDeclContext(D));
+
+ F.addMatcher(stmt(forEachDescendant(FinalM)), &CB);
+ F.match(*D->getBody(), AM.getASTContext());
+}
+
+void ento::registerNumberObjectConversionChecker(CheckerManager &Mgr) {
+ NumberObjectConversionChecker *Chk =
+ Mgr.registerChecker<NumberObjectConversionChecker>();
+ Chk->Pedantic =
+ Mgr.getAnalyzerOptions().getBooleanOption("Pedantic", false, Chk);
+}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
new file mode 100644
index 000000000000..b9857e51f3ea
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp
@@ -0,0 +1,82 @@
+//==- ObjCPropertyChecker.cpp - Check ObjC properties ------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker finds issues with Objective-C properties.
+// Currently finds only one kind of issue:
+// - Find synthesized properties with copy attribute of mutable NS collection
+// types. Calling -copy on such collections produces an immutable copy,
+// which contradicts the type of the property.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ObjCPropertyChecker
+ : public Checker<check::ASTDecl<ObjCPropertyDecl>> {
+ void checkCopyMutable(const ObjCPropertyDecl *D, BugReporter &BR) const;
+
+public:
+ void checkASTDecl(const ObjCPropertyDecl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const;
+};
+} // end anonymous namespace.
+
+void ObjCPropertyChecker::checkASTDecl(const ObjCPropertyDecl *D,
+ AnalysisManager &Mgr,
+ BugReporter &BR) const {
+ checkCopyMutable(D, BR);
+}
+
+void ObjCPropertyChecker::checkCopyMutable(const ObjCPropertyDecl *D,
+ BugReporter &BR) const {
+ if (D->isReadOnly() || D->getSetterKind() != ObjCPropertyDecl::Copy)
+ return;
+
+ QualType T = D->getType();
+ if (!T->isObjCObjectPointerType())
+ return;
+
+ const std::string &PropTypeName(T->getPointeeType().getCanonicalType()
+ .getUnqualifiedType()
+ .getAsString());
+ if (!StringRef(PropTypeName).startswith("NSMutable"))
+ return;
+
+ const ObjCImplDecl *ImplD = nullptr;
+ if (const ObjCInterfaceDecl *IntD =
+ dyn_cast<ObjCInterfaceDecl>(D->getDeclContext())) {
+ ImplD = IntD->getImplementation();
+ } else {
+ const ObjCCategoryDecl *CatD = cast<ObjCCategoryDecl>(D->getDeclContext());
+ ImplD = CatD->getClassInterface()->getImplementation();
+ }
+
+ if (!ImplD || ImplD->HasUserDeclaredSetterMethod(D))
+ return;
+
+ SmallString<128> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "Property of mutable type '" << PropTypeName
+ << "' has 'copy' attribute; an immutable object will be stored instead";
+
+ BR.EmitBasicReport(
+ D, this, "Objective-C property misuse", "Logic error", OS.str(),
+ PathDiagnosticLocation::createBegin(D, BR.getSourceManager()),
+ D->getSourceRange());
+}
+
+void ento::registerObjCPropertyChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<ObjCPropertyChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp b/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
index 15980c5c5387..e75d20897710 100644
--- a/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/ObjCSuperDeallocChecker.cpp
@@ -191,7 +191,7 @@ void ObjCSuperDeallocChecker::reportUseAfterDealloc(SymbolRef Sym,
return;
if (Desc.empty())
- Desc = "use of 'self' after it has been deallocated";
+ Desc = "Use of 'self' after it has been deallocated";
// Generate the report.
std::unique_ptr<BugReport> BR(
diff --git a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index 0640d2f49f43..a51dda6fe858 100644
--- a/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -82,7 +82,11 @@ public:
CharUnits BaselinePad = calculateBaselinePad(RD, ASTContext, RL);
if (BaselinePad.isZero())
return;
- CharUnits OptimalPad = calculateOptimalPad(RD, ASTContext, RL);
+
+ CharUnits OptimalPad;
+ SmallVector<const FieldDecl *, 20> OptimalFieldsOrder;
+ std::tie(OptimalPad, OptimalFieldsOrder) =
+ calculateOptimalPad(RD, ASTContext, RL);
CharUnits DiffPad = PadMultiplier * (BaselinePad - OptimalPad);
if (DiffPad.getQuantity() <= AllowedPad) {
@@ -90,7 +94,7 @@ public:
// There is not enough excess padding to trigger a warning.
return;
}
- reportRecord(RD, BaselinePad, OptimalPad);
+ reportRecord(RD, BaselinePad, OptimalPad, OptimalFieldsOrder);
}
/// \brief Look for arrays of overly padded types. If the padding of the
@@ -199,22 +203,30 @@ public:
/// 7. Add tail padding by rounding the current offset up to the structure
/// alignment. Track the amount of padding added.
- static CharUnits calculateOptimalPad(const RecordDecl *RD,
- const ASTContext &ASTContext,
- const ASTRecordLayout &RL) {
- struct CharUnitPair {
+ static std::pair<CharUnits, SmallVector<const FieldDecl *, 20>>
+ calculateOptimalPad(const RecordDecl *RD, const ASTContext &ASTContext,
+ const ASTRecordLayout &RL) {
+ struct FieldInfo {
CharUnits Align;
CharUnits Size;
- bool operator<(const CharUnitPair &RHS) const {
+ const FieldDecl *Field;
+ bool operator<(const FieldInfo &RHS) const {
// Order from small alignments to large alignments,
// then large sizes to small sizes.
- return std::make_pair(Align, -Size) <
- std::make_pair(RHS.Align, -RHS.Size);
+ // then large field indices to small field indices
+ return std::make_tuple(Align, -Size,
+ Field ? -static_cast<int>(Field->getFieldIndex())
+ : 0) <
+ std::make_tuple(
+ RHS.Align, -RHS.Size,
+ RHS.Field ? -static_cast<int>(RHS.Field->getFieldIndex())
+ : 0);
}
};
- SmallVector<CharUnitPair, 20> Fields;
+ SmallVector<FieldInfo, 20> Fields;
auto GatherSizesAndAlignments = [](const FieldDecl *FD) {
- CharUnitPair RetVal;
+ FieldInfo RetVal;
+ RetVal.Field = FD;
auto &Ctx = FD->getASTContext();
std::tie(RetVal.Size, RetVal.Align) =
Ctx.getTypeInfoInChars(FD->getType());
@@ -226,14 +238,13 @@ public:
std::transform(RD->field_begin(), RD->field_end(),
std::back_inserter(Fields), GatherSizesAndAlignments);
std::sort(Fields.begin(), Fields.end());
-
// This lets us skip over vptrs and non-virtual bases,
// so that we can just worry about the fields in our object.
// Note that this does cause us to miss some cases where we
// could pack more bytes in to a base class's tail padding.
CharUnits NewOffset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
CharUnits NewPad;
-
+ SmallVector<const FieldDecl *, 20> OptimalFieldsOrder;
while (!Fields.empty()) {
unsigned TrailingZeros =
llvm::countTrailingZeros((unsigned long long)NewOffset.getQuantity());
@@ -242,7 +253,7 @@ public:
// our long long (and CharUnits internal type) negative. So shift 62.
long long CurAlignmentBits = 1ull << (std::min)(TrailingZeros, 62u);
CharUnits CurAlignment = CharUnits::fromQuantity(CurAlignmentBits);
- CharUnitPair InsertPoint = {CurAlignment, CharUnits::Zero()};
+ FieldInfo InsertPoint = {CurAlignment, CharUnits::Zero(), nullptr};
auto CurBegin = Fields.begin();
auto CurEnd = Fields.end();
@@ -255,6 +266,7 @@ public:
// We found a field that we can layout with the current alignment.
--Iter;
NewOffset += Iter->Size;
+ OptimalFieldsOrder.push_back(Iter->Field);
Fields.erase(Iter);
} else {
// We are poorly aligned, and we need to pad in order to layout another
@@ -268,18 +280,18 @@ public:
// Calculate tail padding.
CharUnits NewSize = NewOffset.alignTo(RL.getAlignment());
NewPad += NewSize - NewOffset;
- return NewPad;
+ return {NewPad, std::move(OptimalFieldsOrder)};
}
- void reportRecord(const RecordDecl *RD, CharUnits BaselinePad,
- CharUnits TargetPad) const {
+ void reportRecord(
+ const RecordDecl *RD, CharUnits BaselinePad, CharUnits OptimalPad,
+ const SmallVector<const FieldDecl *, 20> &OptimalFieldsOrder) const {
if (!PaddingBug)
PaddingBug =
llvm::make_unique<BugType>(this, "Excessive Padding", "Performance");
SmallString<100> Buf;
llvm::raw_svector_ostream Os(Buf);
-
Os << "Excessive padding in '";
Os << QualType::getAsString(RD->getTypeForDecl(), Qualifiers()) << "'";
@@ -294,16 +306,18 @@ public:
}
Os << " (" << BaselinePad.getQuantity() << " padding bytes, where "
- << TargetPad.getQuantity() << " is optimal). Consider reordering "
- << "the fields or adding explicit padding members.";
+ << OptimalPad.getQuantity() << " is optimal). \n"
+ << "Optimal fields order: \n";
+ for (const auto *FD : OptimalFieldsOrder)
+ Os << FD->getName() << ", \n";
+ Os << "consider reordering the fields or adding explicit padding "
+ "members.";
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::create(RD, BR->getSourceManager());
-
auto Report = llvm::make_unique<BugReport>(*PaddingBug, Os.str(), CELoc);
Report->setDeclWithIssue(RD);
Report->addRange(RD->getSourceRange());
-
BR->emitReport(std::move(Report));
}
};
diff --git a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
index df5118806bff..8caf6df4d970 100644
--- a/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -19,7 +19,6 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "llvm/ADT/SmallVector.h"
using namespace clang;
using namespace ento;
diff --git a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 28a4a083ea3c..7ef79c683c49 100644
--- a/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -18,7 +18,6 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "llvm/ADT/ImmutableList.h"
using namespace clang;
using namespace ento;
diff --git a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
index b646127cfae7..204b0a6c468b 100644
--- a/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -740,7 +740,7 @@ public:
ObjCAllocRetE(gcenabled
? RetEffect::MakeGCNotOwned()
: (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
- : RetEffect::MakeOwned(RetEffect::ObjC, true))),
+ : RetEffect::MakeOwned(RetEffect::ObjC))),
ObjCInitRetE(gcenabled
? RetEffect::MakeGCNotOwned()
: (usesARC ? RetEffect::MakeNotOwned(RetEffect::ObjC)
@@ -953,7 +953,10 @@ void RetainSummaryManager::updateSummaryForCall(const RetainSummary *&S,
if (IdentifierInfo *Name = FC->getDecl()->getIdentifier()) {
// When the CGBitmapContext is deallocated, the callback here will free
// the associated data buffer.
- if (Name->isStr("CGBitmapContextCreateWithData"))
+ // The callback in dispatch_data_create frees the buffer, but not
+ // the data object.
+ if (Name->isStr("CGBitmapContextCreateWithData") ||
+ Name->isStr("dispatch_data_create"))
RE = S->getRetEffect();
}
}
@@ -1086,7 +1089,7 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
FName == "IOOpenFirmwarePathMatching") {
// Part of <rdar://problem/6961230>. (IOKit)
// This should be addressed using a API table.
- S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
+ S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF),
DoNothing, DoNothing);
} else if (FName == "IOServiceGetMatchingService" ||
FName == "IOServiceGetMatchingServices") {
@@ -1116,7 +1119,7 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
// passed to CGBitmapContextCreateWithData is released via
// a callback and doing full IPA to make sure this is done correctly.
ScratchArgs = AF.add(ScratchArgs, 8, StopTracking);
- S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
+ S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF),
DoNothing, DoNothing);
} else if (FName == "CVPixelBufferCreateWithPlanarBytes") {
// FIXES: <rdar://problem/7283567>
@@ -1126,6 +1129,14 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
// correctly.
ScratchArgs = AF.add(ScratchArgs, 12, StopTracking);
S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "VTCompressionSessionEncodeFrame") {
+ // The context argument passed to VTCompressionSessionEncodeFrame()
+ // is passed to the callback specified when creating the session
+ // (e.g. with VTCompressionSessionCreate()) which can release it.
+ // To account for this possibility, conservatively stop tracking
+ // the context.
+ ScratchArgs = AF.add(ScratchArgs, 5, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
} else if (FName == "dispatch_set_context" ||
FName == "xpc_connection_set_context") {
// <rdar://problem/11059275> - The analyzer currently doesn't have
@@ -1171,8 +1182,9 @@ RetainSummaryManager::getFunctionSummary(const FunctionDecl *FD) {
break;
}
- // For CoreGraphics ('CG') types.
- if (cocoa::isRefType(RetTy, "CG", FName)) {
+ // For CoreGraphics ('CG') and CoreVideo ('CV') types.
+ if (cocoa::isRefType(RetTy, "CG", FName) ||
+ cocoa::isRefType(RetTy, "CV", FName)) {
if (isRetain(FD, FName))
S = getUnarySummary(FT, cfretain);
else
@@ -1283,7 +1295,7 @@ const RetainSummary *
RetainSummaryManager::getCFSummaryCreateRule(const FunctionDecl *FD) {
assert (ScratchArgs.isEmpty());
- return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+ return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF));
}
const RetainSummary *
@@ -1313,7 +1325,7 @@ RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
}
if (D->hasAttr<CFReturnsRetainedAttr>())
- return RetEffect::MakeOwned(RetEffect::CF, true);
+ return RetEffect::MakeOwned(RetEffect::CF);
if (D->hasAttr<CFReturnsNotRetainedAttr>())
return RetEffect::MakeNotOwned(RetEffect::CF);
@@ -1426,7 +1438,7 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
case OMF_new:
case OMF_copy:
case OMF_mutableCopy:
- ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
+ ResultEff = RetEffect::MakeOwned(RetEffect::CF);
break;
default:
ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
@@ -1448,7 +1460,7 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
if (cocoa::isCocoaObjectRef(RetTy))
ResultEff = ObjCAllocRetE;
else if (coreFoundation::isCFObjectRef(RetTy))
- ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
+ ResultEff = RetEffect::MakeOwned(RetEffect::CF);
break;
case OMF_autorelease:
ReceiverEff = Autorelease;
@@ -1579,7 +1591,7 @@ void RetainSummaryManager::InitializeMethodSummaries() {
// The next methods are allocators.
const RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
const RetainSummary *CFAllocSumm =
- getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+ getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF));
// Create the "retain" selector.
RetEffect NoRet = RetEffect::MakeNoRet();
@@ -1978,11 +1990,23 @@ PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
}
if (CurrV.getObjKind() == RetEffect::CF) {
- os << " returns a Core Foundation object with a ";
+ if (Sym->getType().isNull()) {
+ os << " returns a Core Foundation object with a ";
+ } else {
+ os << " returns a Core Foundation object of type "
+ << Sym->getType().getAsString() << " with a ";
+ }
}
else {
assert (CurrV.getObjKind() == RetEffect::ObjC);
- os << " returns an Objective-C object with a ";
+ QualType T = Sym->getType();
+ if (T.isNull() || !isa<ObjCObjectPointerType>(T)) {
+ os << " returns an Objective-C object with a ";
+ } else {
+ const ObjCObjectPointerType *PT = cast<ObjCObjectPointerType>(T);
+ os << " returns an instance of "
+ << PT->getPointeeType().getAsString() << " with a ";
+ }
}
if (CurrV.isOwned()) {
@@ -2358,10 +2382,15 @@ CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
os << "that is annotated as NS_RETURNS_NOT_RETAINED";
else {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- os << "whose name ('" << MD->getSelector().getAsString()
- << "') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'."
- " This violates the naming convention rules"
- " given in the Memory Management Guide for Cocoa";
+ if (BRC.getASTContext().getLangOpts().ObjCAutoRefCount) {
+ os << "managed by Automatic Reference Counting";
+ } else {
+ os << "whose name ('" << MD->getSelector().getAsString()
+ << "') does not start with "
+ "'copy', 'mutableCopy', 'alloc' or 'new'."
+ " This violates the naming convention rules"
+ " given in the Memory Management Guide for Cocoa";
+ }
}
else {
const FunctionDecl *FD = cast<FunctionDecl>(D);
@@ -2417,12 +2446,7 @@ CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
// FIXME: This will crash the analyzer if an allocation comes from an
// implicit call (ex: a destructor call).
// (Currently there are no such allocations in Cocoa, though.)
- const Stmt *AllocStmt = nullptr;
- ProgramPoint P = AllocNode->getLocation();
- if (Optional<CallExitEnd> Exit = P.getAs<CallExitEnd>())
- AllocStmt = Exit->getCalleeContext()->getCallSite();
- else
- AllocStmt = P.castAs<PostStmt>().getStmt();
+ const Stmt *AllocStmt = PathDiagnosticLocation::getStmt(AllocNode);
assert(AllocStmt && "Cannot find allocation statement");
PathDiagnosticLocation AllocLocation =
@@ -2640,10 +2664,6 @@ public:
ArrayRef<const MemRegion *> Regions,
const CallEvent *Call) const;
- bool wantsRegionChangeUpdate(ProgramStateRef state) const {
- return true;
- }
-
void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
void checkReturnWithRetEffect(const ReturnStmt *S, CheckerContext &C,
ExplodedNode *Pred, RetEffect RE, RefVal X,
@@ -3071,7 +3091,6 @@ void RetainCountChecker::checkSummary(const RetainSummary &Summ,
// No work necessary.
break;
- case RetEffect::OwnedAllocatedSymbol:
case RetEffect::OwnedSymbol: {
SymbolRef Sym = CallOrMsg.getReturnValue().getAsSymbol();
if (!Sym)
@@ -3372,12 +3391,13 @@ bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
// Handle: id NSMakeCollectable(CFTypeRef)
canEval = II->isStr("NSMakeCollectable");
} else if (ResultTy->isPointerType()) {
- // Handle: (CF|CG)Retain
+ // Handle: (CF|CG|CV)Retain
// CFAutorelease
// CFMakeCollectable
// It's okay to be a little sloppy here (CGMakeCollectable doesn't exist).
if (cocoa::isRefType(ResultTy, "CF", FName) ||
- cocoa::isRefType(ResultTy, "CG", FName)) {
+ cocoa::isRefType(ResultTy, "CG", FName) ||
+ cocoa::isRefType(ResultTy, "CV", FName)) {
canEval = isRetain(FD, FName) || isAutorelease(FD, FName) ||
isMakeCollectable(FD, FName);
}
@@ -3866,7 +3886,7 @@ void RetainCountChecker::checkEndFunction(CheckerContext &Ctx) const {
// Don't process anything within synthesized bodies.
const LocationContext *LCtx = Pred->getLocationContext();
if (LCtx->getAnalysisDeclContext()->isBodyAutosynthesized()) {
- assert(LCtx->getParent());
+ assert(!LCtx->inTopFrame());
return;
}
diff --git a/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
new file mode 100644
index 000000000000..93ad17cffb34
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -0,0 +1,1055 @@
+//=== StdLibraryFunctionsChecker.cpp - Model standard functions -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker improves modeling of a few simple library functions.
+// It does not generate warnings.
+//
+// This checker provides a specification format - `FunctionSummaryTy' - and
+// contains descriptions of some library functions in this format. Each
+// specification contains a list of branches for splitting the program state
+// upon call, and range constraints on argument and return-value symbols that
+// are satisfied on each branch. This spec can be expanded to include more
+// items, like external effects of the function.
+//
+// The main difference between this approach and the body farms technique is
+// in more explicit control over how many branches are produced. For example,
+// consider standard C function `ispunct(int x)', which returns a non-zero value
+// iff `x' is a punctuation character, that is, when `x' is in range
+// ['!', '/'] [':', '@'] U ['[', '\`'] U ['{', '~'].
+// `FunctionSummaryTy' provides only two branches for this function. However,
+// any attempt to describe this range with if-statements in the body farm
+// would result in many more branches. Because each branch needs to be analyzed
+// independently, this significantly reduces performance. Additionally,
+// once we consider a branch on which `x' is in range, say, ['!', '/'],
+// we assume that such branch is an important separate path through the program,
+// which may lead to false positives because considering this particular path
+// was not consciously intended, and therefore it might have been unreachable.
+//
+// This checker uses eval::Call for modeling "pure" functions, for which
+// their `FunctionSummaryTy' is a precise model. This avoids unnecessary
+// invalidation passes. Conflicts with other checkers are unlikely because
+// if the function has no other effects, other checkers would probably never
+// want to improve upon the modeling done by this checker.
+//
+// Non-"pure" functions, for which only partial improvement over the default
+// behavior is expected, are modeled via check::PostCall, non-intrusively.
+//
+// The following standard C functions are currently supported:
+//
+// fgetc getline isdigit isupper
+// fread isalnum isgraph isxdigit
+// fwrite isalpha islower read
+// getc isascii isprint write
+// getchar isblank ispunct
+// getdelim iscntrl isspace
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace clang::ento;
+
+namespace {
+class StdLibraryFunctionsChecker : public Checker<check::PostCall, eval::Call> {
+ /// Below is a series of typedefs necessary to define function specs.
+ /// We avoid nesting types here because each additional qualifier
+ /// would need to be repeated in every function spec.
+ struct FunctionSummaryTy;
+
+ /// Specify how much the analyzer engine should entrust modeling this function
+ /// to us. If he doesn't, he performs additional invalidations.
+ enum InvalidationKindTy { NoEvalCall, EvalCallAsPure };
+
+ /// A pair of ValueRangeKindTy and IntRangeVectorTy would describe a range
+ /// imposed on a particular argument or return value symbol.
+ ///
+ /// Given a range, should the argument stay inside or outside this range?
+ /// The special `ComparesToArgument' value indicates that we should
+ /// impose a constraint that involves other argument or return value symbols.
+ enum ValueRangeKindTy { OutOfRange, WithinRange, ComparesToArgument };
+
+ // The universal integral type to use in value range descriptions.
+ // Unsigned to make sure overflows are well-defined.
+ typedef uint64_t RangeIntTy;
+
+ /// Normally, describes a single range constraint, eg. {{0, 1}, {3, 4}} is
+ /// a non-negative integer, which less than 5 and not equal to 2. For
+ /// `ComparesToArgument', holds information about how exactly to compare to
+ /// the argument.
+ typedef std::vector<std::pair<RangeIntTy, RangeIntTy>> IntRangeVectorTy;
+
+ /// A reference to an argument or return value by its number.
+ /// ArgNo in CallExpr and CallEvent is defined as Unsigned, but
+ /// obviously uint32_t should be enough for all practical purposes.
+ typedef uint32_t ArgNoTy;
+ static const ArgNoTy Ret = std::numeric_limits<ArgNoTy>::max();
+
+ /// Incapsulates a single range on a single symbol within a branch.
+ class ValueRange {
+ ArgNoTy ArgNo; // Argument to which we apply the range.
+ ValueRangeKindTy Kind; // Kind of range definition.
+ IntRangeVectorTy Args; // Polymorphic arguments.
+
+ public:
+ ValueRange(ArgNoTy ArgNo, ValueRangeKindTy Kind,
+ const IntRangeVectorTy &Args)
+ : ArgNo(ArgNo), Kind(Kind), Args(Args) {}
+
+ ArgNoTy getArgNo() const { return ArgNo; }
+ ValueRangeKindTy getKind() const { return Kind; }
+
+ BinaryOperator::Opcode getOpcode() const {
+ assert(Kind == ComparesToArgument);
+ assert(Args.size() == 1);
+ BinaryOperator::Opcode Op =
+ static_cast<BinaryOperator::Opcode>(Args[0].first);
+ assert(BinaryOperator::isComparisonOp(Op) &&
+ "Only comparison ops are supported for ComparesToArgument");
+ return Op;
+ }
+
+ ArgNoTy getOtherArgNo() const {
+ assert(Kind == ComparesToArgument);
+ assert(Args.size() == 1);
+ return static_cast<ArgNoTy>(Args[0].second);
+ }
+
+ const IntRangeVectorTy &getRanges() const {
+ assert(Kind != ComparesToArgument);
+ return Args;
+ }
+
+ // We avoid creating a virtual apply() method because
+ // it makes initializer lists harder to write.
+ private:
+ ProgramStateRef
+ applyAsOutOfRange(ProgramStateRef State, const CallEvent &Call,
+ const FunctionSummaryTy &Summary) const;
+ ProgramStateRef
+ applyAsWithinRange(ProgramStateRef State, const CallEvent &Call,
+ const FunctionSummaryTy &Summary) const;
+ ProgramStateRef
+ applyAsComparesToArgument(ProgramStateRef State, const CallEvent &Call,
+ const FunctionSummaryTy &Summary) const;
+
+ public:
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const FunctionSummaryTy &Summary) const {
+ switch (Kind) {
+ case OutOfRange:
+ return applyAsOutOfRange(State, Call, Summary);
+ case WithinRange:
+ return applyAsWithinRange(State, Call, Summary);
+ case ComparesToArgument:
+ return applyAsComparesToArgument(State, Call, Summary);
+ }
+ llvm_unreachable("Unknown ValueRange kind!");
+ }
+ };
+
+ /// The complete list of ranges that defines a single branch.
+ typedef std::vector<ValueRange> ValueRangeSet;
+
+ /// Includes information about function prototype (which is necessary to
+ /// ensure we're modeling the right function and casting values properly),
+ /// approach to invalidation, and a list of branches - essentially, a list
+ /// of list of ranges - essentially, a list of lists of lists of segments.
+ struct FunctionSummaryTy {
+ const std::vector<QualType> ArgTypes;
+ const QualType RetType;
+ const InvalidationKindTy InvalidationKind;
+ const std::vector<ValueRangeSet> Ranges;
+
+ private:
+ static void assertTypeSuitableForSummary(QualType T) {
+ assert(!T->isVoidType() &&
+ "We should have had no significant void types in the spec");
+ assert(T.isCanonical() &&
+ "We should only have canonical types in the spec");
+ // FIXME: lift this assert (but not the ones above!)
+ assert(T->isIntegralOrEnumerationType() &&
+ "We only support integral ranges in the spec");
+ }
+
+ public:
+ QualType getArgType(ArgNoTy ArgNo) const {
+ QualType T = (ArgNo == Ret) ? RetType : ArgTypes[ArgNo];
+ assertTypeSuitableForSummary(T);
+ return T;
+ }
+
+ /// Try our best to figure out if the call expression is the call of
+ /// *the* library function to which this specification applies.
+ bool matchesCall(const CallExpr *CE) const;
+ };
+
+ // The same function (as in, function identifier) may have different
+ // summaries assigned to it, with different argument and return value types.
+ // We call these "variants" of the function. This can be useful for handling
+ // C++ function overloads, and also it can be used when the same function
+ // may have different definitions on different platforms.
+ typedef std::vector<FunctionSummaryTy> FunctionVariantsTy;
+
+ // The map of all functions supported by the checker. It is initialized
+ // lazily, and it doesn't change after initialization.
+ typedef llvm::StringMap<FunctionVariantsTy> FunctionSummaryMapTy;
+ mutable FunctionSummaryMapTy FunctionSummaryMap;
+
+ // Auxiliary functions to support ArgNoTy within all structures
+ // in a unified manner.
+ static QualType getArgType(const FunctionSummaryTy &Summary, ArgNoTy ArgNo) {
+ return Summary.getArgType(ArgNo);
+ }
+ static QualType getArgType(const CallEvent &Call, ArgNoTy ArgNo) {
+ return ArgNo == Ret ? Call.getResultType().getCanonicalType()
+ : Call.getArgExpr(ArgNo)->getType().getCanonicalType();
+ }
+ static QualType getArgType(const CallExpr *CE, ArgNoTy ArgNo) {
+ return ArgNo == Ret ? CE->getType().getCanonicalType()
+ : CE->getArg(ArgNo)->getType().getCanonicalType();
+ }
+ static SVal getArgSVal(const CallEvent &Call, ArgNoTy ArgNo) {
+ return ArgNo == Ret ? Call.getReturnValue() : Call.getArgSVal(ArgNo);
+ }
+
+public:
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+ Optional<FunctionSummaryTy> findFunctionSummary(const FunctionDecl *FD,
+ const CallExpr *CE,
+ CheckerContext &C) const;
+
+ void initFunctionSummaries(BasicValueFactory &BVF) const;
+};
+} // end of anonymous namespace
+
+ProgramStateRef StdLibraryFunctionsChecker::ValueRange::applyAsOutOfRange(
+ ProgramStateRef State, const CallEvent &Call,
+ const FunctionSummaryTy &Summary) const {
+
+ ProgramStateManager &Mgr = State->getStateManager();
+ SValBuilder &SVB = Mgr.getSValBuilder();
+ BasicValueFactory &BVF = SVB.getBasicValueFactory();
+ ConstraintManager &CM = Mgr.getConstraintManager();
+ QualType T = getArgType(Summary, getArgNo());
+ SVal V = getArgSVal(Call, getArgNo());
+
+ if (auto N = V.getAs<NonLoc>()) {
+ const IntRangeVectorTy &R = getRanges();
+ size_t E = R.size();
+ for (size_t I = 0; I != E; ++I) {
+ const llvm::APSInt &Min = BVF.getValue(R[I].first, T);
+ const llvm::APSInt &Max = BVF.getValue(R[I].second, T);
+ assert(Min <= Max);
+ State = CM.assumeInclusiveRange(State, *N, Min, Max, false);
+ if (!State)
+ break;
+ }
+ }
+
+ return State;
+}
+
+ProgramStateRef
+StdLibraryFunctionsChecker::ValueRange::applyAsWithinRange(
+ ProgramStateRef State, const CallEvent &Call,
+ const FunctionSummaryTy &Summary) const {
+
+ ProgramStateManager &Mgr = State->getStateManager();
+ SValBuilder &SVB = Mgr.getSValBuilder();
+ BasicValueFactory &BVF = SVB.getBasicValueFactory();
+ ConstraintManager &CM = Mgr.getConstraintManager();
+ QualType T = getArgType(Summary, getArgNo());
+ SVal V = getArgSVal(Call, getArgNo());
+
+ // "WithinRange R" is treated as "outside [T_MIN, T_MAX] \ R".
+ // We cut off [T_MIN, min(R) - 1] and [max(R) + 1, T_MAX] if necessary,
+ // and then cut away all holes in R one by one.
+ if (auto N = V.getAs<NonLoc>()) {
+ const IntRangeVectorTy &R = getRanges();
+ size_t E = R.size();
+
+ const llvm::APSInt &MinusInf = BVF.getMinValue(T);
+ const llvm::APSInt &PlusInf = BVF.getMaxValue(T);
+
+ const llvm::APSInt &Left = BVF.getValue(R[0].first - 1ULL, T);
+ if (Left != PlusInf) {
+ assert(MinusInf <= Left);
+ State = CM.assumeInclusiveRange(State, *N, MinusInf, Left, false);
+ if (!State)
+ return nullptr;
+ }
+
+ const llvm::APSInt &Right = BVF.getValue(R[E - 1].second + 1ULL, T);
+ if (Right != MinusInf) {
+ assert(Right <= PlusInf);
+ State = CM.assumeInclusiveRange(State, *N, Right, PlusInf, false);
+ if (!State)
+ return nullptr;
+ }
+
+ for (size_t I = 1; I != E; ++I) {
+ const llvm::APSInt &Min = BVF.getValue(R[I - 1].second + 1ULL, T);
+ const llvm::APSInt &Max = BVF.getValue(R[I].first - 1ULL, T);
+ assert(Min <= Max);
+ State = CM.assumeInclusiveRange(State, *N, Min, Max, false);
+ if (!State)
+ return nullptr;
+ }
+ }
+
+ return State;
+}
+
+ProgramStateRef
+StdLibraryFunctionsChecker::ValueRange::applyAsComparesToArgument(
+ ProgramStateRef State, const CallEvent &Call,
+ const FunctionSummaryTy &Summary) const {
+
+ ProgramStateManager &Mgr = State->getStateManager();
+ SValBuilder &SVB = Mgr.getSValBuilder();
+ QualType CondT = SVB.getConditionType();
+ QualType T = getArgType(Summary, getArgNo());
+ SVal V = getArgSVal(Call, getArgNo());
+
+ BinaryOperator::Opcode Op = getOpcode();
+ ArgNoTy OtherArg = getOtherArgNo();
+ SVal OtherV = getArgSVal(Call, OtherArg);
+ QualType OtherT = getArgType(Call, OtherArg);
+ // Note: we avoid integral promotion for comparison.
+ OtherV = SVB.evalCast(OtherV, T, OtherT);
+ if (auto CompV = SVB.evalBinOp(State, Op, V, OtherV, CondT)
+ .getAs<DefinedOrUnknownSVal>())
+ State = State->assume(*CompV, true);
+ return State;
+}
+
+void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!FD)
+ return;
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
+ if (!CE)
+ return;
+
+ Optional<FunctionSummaryTy> FoundSummary = findFunctionSummary(FD, CE, C);
+ if (!FoundSummary)
+ return;
+
+ // Now apply ranges.
+ const FunctionSummaryTy &Summary = *FoundSummary;
+ ProgramStateRef State = C.getState();
+
+ for (const auto &VRS: Summary.Ranges) {
+ ProgramStateRef NewState = State;
+ for (const auto &VR: VRS) {
+ NewState = VR.apply(NewState, Call, Summary);
+ if (!NewState)
+ break;
+ }
+
+ if (NewState && NewState != State)
+ C.addTransition(NewState);
+ }
+}
+
+bool StdLibraryFunctionsChecker::evalCall(const CallExpr *CE,
+ CheckerContext &C) const {
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CE->getCalleeDecl());
+ if (!FD)
+ return false;
+
+ Optional<FunctionSummaryTy> FoundSummary = findFunctionSummary(FD, CE, C);
+ if (!FoundSummary)
+ return false;
+
+ const FunctionSummaryTy &Summary = *FoundSummary;
+ switch (Summary.InvalidationKind) {
+ case EvalCallAsPure: {
+ ProgramStateRef State = C.getState();
+ const LocationContext *LC = C.getLocationContext();
+ SVal V = C.getSValBuilder().conjureSymbolVal(
+ CE, LC, CE->getType().getCanonicalType(), C.blockCount());
+ State = State->BindExpr(CE, LC, V);
+ C.addTransition(State);
+ return true;
+ }
+ case NoEvalCall:
+ // Summary tells us to avoid performing eval::Call. The function is possibly
+ // evaluated by another checker, or evaluated conservatively.
+ return false;
+ }
+ llvm_unreachable("Unknown invalidation kind!");
+}
+
+bool StdLibraryFunctionsChecker::FunctionSummaryTy::matchesCall(
+ const CallExpr *CE) const {
+ // Check number of arguments:
+ if (CE->getNumArgs() != ArgTypes.size())
+ return false;
+
+ // Check return type if relevant:
+ if (!RetType.isNull() && RetType != CE->getType().getCanonicalType())
+ return false;
+
+ // Check argument types when relevant:
+ for (size_t I = 0, E = ArgTypes.size(); I != E; ++I) {
+ QualType FormalT = ArgTypes[I];
+ // Null type marks irrelevant arguments.
+ if (FormalT.isNull())
+ continue;
+
+ assertTypeSuitableForSummary(FormalT);
+
+ QualType ActualT = StdLibraryFunctionsChecker::getArgType(CE, I);
+ assert(ActualT.isCanonical());
+ if (ActualT != FormalT)
+ return false;
+ }
+
+ return true;
+}
+
+Optional<StdLibraryFunctionsChecker::FunctionSummaryTy>
+StdLibraryFunctionsChecker::findFunctionSummary(const FunctionDecl *FD,
+ const CallExpr *CE,
+ CheckerContext &C) const {
+ // Note: we cannot always obtain FD from CE
+ // (eg. virtual call, or call by pointer).
+ assert(CE);
+
+ if (!FD)
+ return None;
+
+ SValBuilder &SVB = C.getSValBuilder();
+ BasicValueFactory &BVF = SVB.getBasicValueFactory();
+ initFunctionSummaries(BVF);
+
+ std::string Name = FD->getQualifiedNameAsString();
+ if (Name.empty() || !C.isCLibraryFunction(FD, Name))
+ return None;
+
+ auto FSMI = FunctionSummaryMap.find(Name);
+ if (FSMI == FunctionSummaryMap.end())
+ return None;
+
+ // Verify that function signature matches the spec in advance.
+ // Otherwise we might be modeling the wrong function.
+ // Strict checking is important because we will be conducting
+ // very integral-type-sensitive operations on arguments and
+ // return values.
+ const FunctionVariantsTy &SpecVariants = FSMI->second;
+ for (const FunctionSummaryTy &Spec : SpecVariants)
+ if (Spec.matchesCall(CE))
+ return Spec;
+
+ return None;
+}
+
+void StdLibraryFunctionsChecker::initFunctionSummaries(
+ BasicValueFactory &BVF) const {
+ if (!FunctionSummaryMap.empty())
+ return;
+
+ ASTContext &ACtx = BVF.getContext();
+
+ // These types are useful for writing specifications quickly,
+ // New specifications should probably introduce more types.
+ // Some types are hard to obtain from the AST, eg. "ssize_t".
+ // In such cases it should be possible to provide multiple variants
+ // of function summary for common cases (eg. ssize_t could be int or long
+ // or long long, so three summary variants would be enough).
+ // Of course, function variants are also useful for C++ overloads.
+ QualType Irrelevant; // A placeholder, whenever we do not care about the type.
+ QualType IntTy = ACtx.IntTy;
+ QualType LongTy = ACtx.LongTy;
+ QualType LongLongTy = ACtx.LongLongTy;
+ QualType SizeTy = ACtx.getSizeType();
+
+ RangeIntTy IntMax = BVF.getMaxValue(IntTy).getLimitedValue();
+ RangeIntTy LongMax = BVF.getMaxValue(LongTy).getLimitedValue();
+ RangeIntTy LongLongMax = BVF.getMaxValue(LongLongTy).getLimitedValue();
+
+ // We are finally ready to define specifications for all supported functions.
+ //
+ // The signature needs to have the correct number of arguments.
+ // However, we insert `Irrelevant' when the type is insignificant.
+ //
+ // Argument ranges should always cover all variants. If return value
+ // is completely unknown, omit it from the respective range set.
+ //
+ // All types in the spec need to be canonical.
+ //
+ // Every item in the list of range sets represents a particular
+ // execution path the analyzer would need to explore once
+ // the call is modeled - a new program state is constructed
+ // for every range set, and each range line in the range set
+ // corresponds to a specific constraint within this state.
+ //
+ // Upon comparing to another argument, the other argument is casted
+ // to the current argument's type. This avoids proper promotion but
+ // seems useful. For example, read() receives size_t argument,
+ // and its return value, which is of type ssize_t, cannot be greater
+ // than this argument. If we made a promotion, and the size argument
+ // is equal to, say, 10, then we'd impose a range of [0, 10] on the
+ // return value, however the correct range is [-1, 10].
+ //
+ // Please update the list of functions in the header after editing!
+ //
+ // The format is as follows:
+ //
+ //{ "function name",
+ // { spec:
+ // { argument types list, ... },
+ // return type, purity, { range set list:
+ // { range list:
+ // { argument index, within or out of, {{from, to}, ...} },
+ // { argument index, compares to argument, {{how, which}} },
+ // ...
+ // }
+ // }
+ // }
+ //}
+
+#define SUMMARY_WITH_VARIANTS(identifier) {#identifier, {
+#define END_SUMMARY_WITH_VARIANTS }},
+#define VARIANT(argument_types, return_type, invalidation_approach) \
+ { argument_types, return_type, invalidation_approach, {
+#define END_VARIANT } },
+#define SUMMARY(identifier, argument_types, return_type, \
+ invalidation_approach) \
+ { #identifier, { { argument_types, return_type, invalidation_approach, {
+#define END_SUMMARY } } } },
+#define ARGUMENT_TYPES(...) { __VA_ARGS__ }
+#define RETURN_TYPE(x) x
+#define INVALIDATION_APPROACH(x) x
+#define CASE {
+#define END_CASE },
+#define ARGUMENT_CONDITION(argument_number, condition_kind) \
+ { argument_number, condition_kind, {
+#define END_ARGUMENT_CONDITION }},
+#define RETURN_VALUE_CONDITION(condition_kind) \
+ { Ret, condition_kind, {
+#define END_RETURN_VALUE_CONDITION }},
+#define ARG_NO(x) x##U
+#define RANGE(x, y) { x, y },
+#define SINGLE_VALUE(x) RANGE(x, x)
+#define IS_LESS_THAN(arg) { BO_LE, arg }
+
+ FunctionSummaryMap = {
+ // The isascii() family of functions.
+ SUMMARY(isalnum, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE // Boils down to isupper() or islower() or isdigit()
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE('0', '9')
+ RANGE('A', 'Z')
+ RANGE('a', 'z')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE // The locale-specific range.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(128, 255)
+ END_ARGUMENT_CONDITION
+ // No post-condition. We are completely unaware of
+ // locale-specific return values.
+ END_CASE
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE('0', '9')
+ RANGE('A', 'Z')
+ RANGE('a', 'z')
+ RANGE(128, 255)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(isalpha, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE // isupper() or islower(). Note that 'Z' is less than 'a'.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE('A', 'Z')
+ RANGE('a', 'z')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE // The locale-specific range.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(128, 255)
+ END_ARGUMENT_CONDITION
+ END_CASE
+ CASE // Other.
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE('A', 'Z')
+ RANGE('a', 'z')
+ RANGE(128, 255)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(isascii, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE // Is ASCII.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(0, 127)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE(0, 127)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(isblank, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ SINGLE_VALUE('\t')
+ SINGLE_VALUE(' ')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ SINGLE_VALUE('\t')
+ SINGLE_VALUE(' ')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(iscntrl, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE // 0..31 or 127
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(0, 32)
+ SINGLE_VALUE(127)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE(0, 32)
+ SINGLE_VALUE(127)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(isdigit, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE // Is a digit.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE('0', '9')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE('0', '9')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(isgraph, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(33, 126)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE(33, 126)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(islower, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE // Is certainly lowercase.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE('a', 'z')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE // Is ascii but not lowercase.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(0, 127)
+ END_ARGUMENT_CONDITION
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE('a', 'z')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE // The locale-specific range.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(128, 255)
+ END_ARGUMENT_CONDITION
+ END_CASE
+ CASE // Is not an unsigned char.
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE(0, 255)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(isprint, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(32, 126)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE(32, 126)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(ispunct, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE('!', '/')
+ RANGE(':', '@')
+ RANGE('[', '`')
+ RANGE('{', '~')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE('!', '/')
+ RANGE(':', '@')
+ RANGE('[', '`')
+ RANGE('{', '~')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(isspace, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE // Space, '\f', '\n', '\r', '\t', '\v'.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(9, 13)
+ SINGLE_VALUE(' ')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE // The locale-specific range.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(128, 255)
+ END_ARGUMENT_CONDITION
+ END_CASE
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE(9, 13)
+ SINGLE_VALUE(' ')
+ RANGE(128, 255)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(isupper, ARGUMENT_TYPES(IntTy), RETURN_TYPE (IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE // Is certainly uppercase.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE('A', 'Z')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE // The locale-specific range.
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE(128, 255)
+ END_ARGUMENT_CONDITION
+ END_CASE
+ CASE // Other.
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE('A', 'Z') RANGE(128, 255)
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(isxdigit, ARGUMENT_TYPES(IntTy), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(EvalCallAsPure))
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), WithinRange)
+ RANGE('0', '9')
+ RANGE('A', 'F')
+ RANGE('a', 'f')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(OutOfRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ CASE
+ ARGUMENT_CONDITION(ARG_NO(0), OutOfRange)
+ RANGE('0', '9')
+ RANGE('A', 'F')
+ RANGE('a', 'f')
+ END_ARGUMENT_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(0)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+
+ // The getc() family of functions that returns either a char or an EOF.
+ SUMMARY(getc, ARGUMENT_TYPES(Irrelevant), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(NoEvalCall))
+ CASE // FIXME: EOF is assumed to be defined as -1.
+ RETURN_VALUE_CONDITION(WithinRange)
+ RANGE(-1, 255)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(fgetc, ARGUMENT_TYPES(Irrelevant), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(NoEvalCall))
+ CASE // FIXME: EOF is assumed to be defined as -1.
+ RETURN_VALUE_CONDITION(WithinRange)
+ RANGE(-1, 255)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(getchar, ARGUMENT_TYPES(), RETURN_TYPE(IntTy),
+ INVALIDATION_APPROACH(NoEvalCall))
+ CASE // FIXME: EOF is assumed to be defined as -1.
+ RETURN_VALUE_CONDITION(WithinRange)
+ RANGE(-1, 255)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+
+ // read()-like functions that never return more than buffer size.
+ // We are not sure how ssize_t is defined on every platform, so we provide
+ // three variants that should cover common cases.
+ SUMMARY_WITH_VARIANTS(read)
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
+ RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(ComparesToArgument)
+ IS_LESS_THAN(ARG_NO(2))
+ END_RETURN_VALUE_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ RANGE(-1, IntMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
+ RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(ComparesToArgument)
+ IS_LESS_THAN(ARG_NO(2))
+ END_RETURN_VALUE_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ RANGE(-1, LongMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
+ RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(ComparesToArgument)
+ IS_LESS_THAN(ARG_NO(2))
+ END_RETURN_VALUE_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ RANGE(-1, LongLongMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ END_SUMMARY_WITH_VARIANTS
+ SUMMARY_WITH_VARIANTS(write)
+ // Again, due to elusive nature of ssize_t, we have duplicate
+ // our summaries to cover different variants.
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
+ RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(ComparesToArgument)
+ IS_LESS_THAN(ARG_NO(2))
+ END_RETURN_VALUE_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ RANGE(-1, IntMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
+ RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(ComparesToArgument)
+ IS_LESS_THAN(ARG_NO(2))
+ END_RETURN_VALUE_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ RANGE(-1, LongMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy),
+ RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(ComparesToArgument)
+ IS_LESS_THAN(ARG_NO(2))
+ END_RETURN_VALUE_CONDITION
+ RETURN_VALUE_CONDITION(WithinRange)
+ RANGE(-1, LongLongMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ END_SUMMARY_WITH_VARIANTS
+ SUMMARY(fread,
+ ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy, Irrelevant),
+ RETURN_TYPE(SizeTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(ComparesToArgument)
+ IS_LESS_THAN(ARG_NO(2))
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+ SUMMARY(fwrite,
+ ARGUMENT_TYPES(Irrelevant, Irrelevant, SizeTy, Irrelevant),
+ RETURN_TYPE(SizeTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(ComparesToArgument)
+ IS_LESS_THAN(ARG_NO(2))
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_SUMMARY
+
+ // getline()-like functions either fail or read at least the delimiter.
+ SUMMARY_WITH_VARIANTS(getline)
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant),
+ RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(-1)
+ RANGE(1, IntMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant),
+ RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(-1)
+ RANGE(1, LongMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant),
+ RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(-1)
+ RANGE(1, LongLongMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ END_SUMMARY_WITH_VARIANTS
+ SUMMARY_WITH_VARIANTS(getdelim)
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant, Irrelevant),
+ RETURN_TYPE(IntTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(-1)
+ RANGE(1, IntMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant, Irrelevant),
+ RETURN_TYPE(LongTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(-1)
+ RANGE(1, LongMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ VARIANT(ARGUMENT_TYPES(Irrelevant, Irrelevant, Irrelevant, Irrelevant),
+ RETURN_TYPE(LongLongTy), INVALIDATION_APPROACH(NoEvalCall))
+ CASE
+ RETURN_VALUE_CONDITION(WithinRange)
+ SINGLE_VALUE(-1)
+ RANGE(1, LongLongMax)
+ END_RETURN_VALUE_CONDITION
+ END_CASE
+ END_VARIANT
+ END_SUMMARY_WITH_VARIANTS
+ };
+}
+
+void ento::registerStdCLibraryFunctionsChecker(CheckerManager &mgr) {
+ // If this checker grows large enough to support C++, Objective-C, or other
+ // standard libraries, we could use multiple register...Checker() functions,
+ // which would register various checkers with the help of the same Checker
+ // class, turning on different function summaries.
+ mgr.registerChecker<StdLibraryFunctionsChecker>();
+}
diff --git a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 82b01fe814da..915514b42133 100644
--- a/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -19,7 +19,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
-#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
using namespace ento;
diff --git a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index 4b78c2058341..26bf597bd950 100644
--- a/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/raw_ostream.h"
#include <fcntl.h>
@@ -28,6 +29,16 @@
using namespace clang;
using namespace ento;
+enum class OpenVariant {
+ /// The standard open() call:
+ /// int open(const char *path, int oflag, ...);
+ Open,
+
+ /// The variant taking a directory file descriptor and a relative path:
+ /// int openat(int fd, const char *path, int oflag, ...);
+ OpenAt
+};
+
namespace {
class UnixAPIChecker : public Checker< check::PreStmt<CallExpr> > {
mutable std::unique_ptr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
@@ -37,17 +48,24 @@ public:
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
+ void CheckOpenAt(CheckerContext &C, const CallExpr *CE) const;
+
void CheckPthreadOnce(CheckerContext &C, const CallExpr *CE) const;
void CheckCallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckReallocZero(CheckerContext &C, const CallExpr *CE) const;
void CheckReallocfZero(CheckerContext &C, const CallExpr *CE) const;
void CheckAllocaZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckAllocaWithAlignZero(CheckerContext &C, const CallExpr *CE) const;
void CheckVallocZero(CheckerContext &C, const CallExpr *CE) const;
typedef void (UnixAPIChecker::*SubChecker)(CheckerContext &,
const CallExpr *) const;
private:
+
+ void CheckOpenVariant(CheckerContext &C,
+ const CallExpr *CE, OpenVariant Variant) const;
+
bool ReportZeroByteAllocation(CheckerContext &C,
ProgramStateRef falseState,
const Expr *arg,
@@ -89,25 +107,71 @@ void UnixAPIChecker::ReportOpenBug(CheckerContext &C,
}
void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
+ CheckOpenVariant(C, CE, OpenVariant::Open);
+}
+
+void UnixAPIChecker::CheckOpenAt(CheckerContext &C, const CallExpr *CE) const {
+ CheckOpenVariant(C, CE, OpenVariant::OpenAt);
+}
+
+void UnixAPIChecker::CheckOpenVariant(CheckerContext &C,
+ const CallExpr *CE,
+ OpenVariant Variant) const {
+ // The index of the argument taking the flags open flags (O_RDONLY,
+ // O_WRONLY, O_CREAT, etc.),
+ unsigned int FlagsArgIndex;
+ const char *VariantName;
+ switch (Variant) {
+ case OpenVariant::Open:
+ FlagsArgIndex = 1;
+ VariantName = "open";
+ break;
+ case OpenVariant::OpenAt:
+ FlagsArgIndex = 2;
+ VariantName = "openat";
+ break;
+ };
+
+ // All calls should at least provide arguments up to the 'flags' parameter.
+ unsigned int MinArgCount = FlagsArgIndex + 1;
+
+ // If the flags has O_CREAT set then open/openat() require an additional
+ // argument specifying the file mode (permission bits) for the created file.
+ unsigned int CreateModeArgIndex = FlagsArgIndex + 1;
+
+ // The create mode argument should be the last argument.
+ unsigned int MaxArgCount = CreateModeArgIndex + 1;
+
ProgramStateRef state = C.getState();
- if (CE->getNumArgs() < 2) {
+ if (CE->getNumArgs() < MinArgCount) {
// The frontend should issue a warning for this case, so this is a sanity
// check.
return;
- } else if (CE->getNumArgs() == 3) {
- const Expr *Arg = CE->getArg(2);
+ } else if (CE->getNumArgs() == MaxArgCount) {
+ const Expr *Arg = CE->getArg(CreateModeArgIndex);
QualType QT = Arg->getType();
if (!QT->isIntegerType()) {
+ SmallString<256> SBuf;
+ llvm::raw_svector_ostream OS(SBuf);
+ OS << "The " << CreateModeArgIndex + 1
+ << llvm::getOrdinalSuffix(CreateModeArgIndex + 1)
+ << " argument to '" << VariantName << "' is not an integer";
+
ReportOpenBug(C, state,
- "Third argument to 'open' is not an integer",
+ SBuf.c_str(),
Arg->getSourceRange());
return;
}
- } else if (CE->getNumArgs() > 3) {
+ } else if (CE->getNumArgs() > MaxArgCount) {
+ SmallString<256> SBuf;
+ llvm::raw_svector_ostream OS(SBuf);
+ OS << "Call to '" << VariantName << "' with more than " << MaxArgCount
+ << " arguments";
+
ReportOpenBug(C, state,
- "Call to 'open' with more than three arguments",
- CE->getArg(3)->getSourceRange());
+ SBuf.c_str(),
+ CE->getArg(MaxArgCount)->getSourceRange());
return;
}
@@ -127,7 +191,7 @@ void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
}
// Now check if oflags has O_CREAT set.
- const Expr *oflagsEx = CE->getArg(1);
+ const Expr *oflagsEx = CE->getArg(FlagsArgIndex);
const SVal V = state->getSVal(oflagsEx, C.getLocationContext());
if (!V.getAs<NonLoc>()) {
// The case where 'V' can be a location can only be due to a bad header,
@@ -153,10 +217,15 @@ void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
if (!(trueState && !falseState))
return;
- if (CE->getNumArgs() < 3) {
+ if (CE->getNumArgs() < MaxArgCount) {
+ SmallString<256> SBuf;
+ llvm::raw_svector_ostream OS(SBuf);
+ OS << "Call to '" << VariantName << "' requires a "
+ << CreateModeArgIndex + 1
+ << llvm::getOrdinalSuffix(CreateModeArgIndex + 1)
+ << " argument when the 'O_CREAT' flag is set";
ReportOpenBug(C, trueState,
- "Call to 'open' requires a third argument when "
- "the 'O_CREAT' flag is set",
+ SBuf.c_str(),
oflagsEx->getSourceRange());
}
}
@@ -337,6 +406,11 @@ void UnixAPIChecker::CheckAllocaZero(CheckerContext &C,
BasicAllocationCheck(C, CE, 1, 0, "alloca");
}
+void UnixAPIChecker::CheckAllocaWithAlignZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 2, 0, "__builtin_alloca_with_align");
+}
+
void UnixAPIChecker::CheckVallocZero(CheckerContext &C,
const CallExpr *CE) const {
BasicAllocationCheck(C, CE, 1, 0, "valloc");
@@ -353,6 +427,12 @@ void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
if (!FD || FD->getKind() != Decl::Function)
return;
+ // Don't treat functions in namespaces with the same name a Unix function
+ // as a call to the Unix function.
+ const DeclContext *NamespaceCtx = FD->getEnclosingNamespaceContext();
+ if (NamespaceCtx && isa<NamespaceDecl>(NamespaceCtx))
+ return;
+
StringRef FName = C.getCalleeName(FD);
if (FName.empty())
return;
@@ -360,12 +440,15 @@ void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
SubChecker SC =
llvm::StringSwitch<SubChecker>(FName)
.Case("open", &UnixAPIChecker::CheckOpen)
+ .Case("openat", &UnixAPIChecker::CheckOpenAt)
.Case("pthread_once", &UnixAPIChecker::CheckPthreadOnce)
.Case("calloc", &UnixAPIChecker::CheckCallocZero)
.Case("malloc", &UnixAPIChecker::CheckMallocZero)
.Case("realloc", &UnixAPIChecker::CheckReallocZero)
.Case("reallocf", &UnixAPIChecker::CheckReallocfZero)
.Cases("alloca", "__builtin_alloca", &UnixAPIChecker::CheckAllocaZero)
+ .Case("__builtin_alloca_with_align",
+ &UnixAPIChecker::CheckAllocaWithAlignZero)
.Case("valloc", &UnixAPIChecker::CheckVallocZero)
.Default(nullptr);
diff --git a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index 892e713d241f..ccd8e9a18b00 100644
--- a/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -147,6 +147,14 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
PathDiagnosticLocation DL;
SourceLocation SL;
if (const Stmt *S = getUnreachableStmt(CB)) {
+ // In macros, 'do {...} while (0)' is often used. Don't warn about the
+ // condition 0 when it is unreachable.
+ if (S->getLocStart().isMacroID())
+ if (const auto *I = dyn_cast<IntegerLiteral>(S))
+ if (I->getValue() == 0ULL)
+ if (const Stmt *Parent = PM->getParent(S))
+ if (isa<DoStmt>(Parent))
+ continue;
SR = S->getSourceRange();
DL = PathDiagnosticLocation::createBegin(S, B.getSourceManager(), LC);
SL = DL.asLocation();
@@ -191,8 +199,10 @@ void UnreachableCodeChecker::FindUnreachableEntryPoints(const CFGBlock *CB,
// Find the Stmt* in a CFGBlock for reporting a warning
const Stmt *UnreachableCodeChecker::getUnreachableStmt(const CFGBlock *CB) {
for (CFGBlock::const_iterator I = CB->begin(), E = CB->end(); I != E; ++I) {
- if (Optional<CFGStmt> S = I->getAs<CFGStmt>())
- return S->getStmt();
+ if (Optional<CFGStmt> S = I->getAs<CFGStmt>()) {
+ if (!isa<DeclStmt>(S->getStmt()))
+ return S->getStmt();
+ }
}
if (const Stmt *S = CB->getTerminator())
return S;
diff --git a/lib/StaticAnalyzer/Checkers/ValistChecker.cpp b/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
new file mode 100644
index 000000000000..b4bfa0c03341
--- /dev/null
+++ b/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
@@ -0,0 +1,373 @@
+//== ValistChecker.cpp - stdarg.h macro usage checker -----------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines checkers which detect usage of uninitialized va_list values
+// and va_start calls with no matching va_end.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+REGISTER_SET_WITH_PROGRAMSTATE(InitializedVALists, const MemRegion *)
+
+namespace {
+typedef SmallVector<const MemRegion *, 2> RegionVector;
+
+class ValistChecker : public Checker<check::PreCall, check::PreStmt<VAArgExpr>,
+ check::DeadSymbols> {
+ mutable std::unique_ptr<BugType> BT_leakedvalist, BT_uninitaccess;
+
+ struct VAListAccepter {
+ CallDescription Func;
+ int VAListPos;
+ };
+ static const SmallVector<VAListAccepter, 15> VAListAccepters;
+ static const CallDescription VaStart, VaEnd, VaCopy;
+
+public:
+ enum CheckKind {
+ CK_Uninitialized,
+ CK_Unterminated,
+ CK_CopyToSelf,
+ CK_NumCheckKinds
+ };
+
+ DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ CheckName CheckNames[CK_NumCheckKinds];
+
+ void checkPreStmt(const VAArgExpr *VAA, CheckerContext &C) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+
+private:
+ const MemRegion *getVAListAsRegion(SVal SV, CheckerContext &C) const;
+ StringRef getVariableNameFromRegion(const MemRegion *Reg) const;
+ const ExplodedNode *getStartCallSite(const ExplodedNode *N,
+ const MemRegion *Reg,
+ CheckerContext &C) const;
+
+ void reportUninitializedAccess(const MemRegion *VAList, StringRef Msg,
+ CheckerContext &C) const;
+ void reportLeakedVALists(const RegionVector &LeakedVALists, StringRef Msg1,
+ StringRef Msg2, CheckerContext &C, ExplodedNode *N,
+ bool ForceReport = false) const;
+
+ void checkVAListStartCall(const CallEvent &Call, CheckerContext &C,
+ bool IsCopy) const;
+ void checkVAListEndCall(const CallEvent &Call, CheckerContext &C) const;
+
+ class ValistBugVisitor : public BugReporterVisitorImpl<ValistBugVisitor> {
+ public:
+ ValistBugVisitor(const MemRegion *Reg, bool IsLeak = false)
+ : Reg(Reg), IsLeak(IsLeak) {}
+ void Profile(llvm::FoldingSetNodeID &ID) const override {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Reg);
+ }
+ std::unique_ptr<PathDiagnosticPiece>
+ getEndPath(BugReporterContext &BRC, const ExplodedNode *EndPathNode,
+ BugReport &BR) override {
+ if (!IsLeak)
+ return nullptr;
+
+ PathDiagnosticLocation L = PathDiagnosticLocation::createEndOfPath(
+ EndPathNode, BRC.getSourceManager());
+ // Do not add the statement itself as a range in case of leak.
+ return llvm::make_unique<PathDiagnosticEventPiece>(L, BR.getDescription(),
+ false);
+ }
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) override;
+
+ private:
+ const MemRegion *Reg;
+ bool IsLeak;
+ };
+};
+
+const SmallVector<ValistChecker::VAListAccepter, 15>
+ ValistChecker::VAListAccepters = {
+ {{"vfprintf", 3}, 2},
+ {{"vfscanf", 3}, 2},
+ {{"vprintf", 2}, 1},
+ {{"vscanf", 2}, 1},
+ {{"vsnprintf", 4}, 3},
+ {{"vsprintf", 3}, 2},
+ {{"vsscanf", 3}, 2},
+ {{"vfwprintf", 3}, 2},
+ {{"vfwscanf", 3}, 2},
+ {{"vwprintf", 2}, 1},
+ {{"vwscanf", 2}, 1},
+ {{"vswprintf", 4}, 3},
+ // vswprintf is the wide version of vsnprintf,
+ // vsprintf has no wide version
+ {{"vswscanf", 3}, 2}};
+const CallDescription ValistChecker::VaStart("__builtin_va_start", 2),
+ ValistChecker::VaCopy("__builtin_va_copy", 2),
+ ValistChecker::VaEnd("__builtin_va_end", 1);
+} // end anonymous namespace
+
+void ValistChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ if (!Call.isGlobalCFunction())
+ return;
+ if (Call.isCalled(VaStart))
+ checkVAListStartCall(Call, C, false);
+ else if (Call.isCalled(VaCopy))
+ checkVAListStartCall(Call, C, true);
+ else if (Call.isCalled(VaEnd))
+ checkVAListEndCall(Call, C);
+ else {
+ for (auto FuncInfo : VAListAccepters) {
+ if (!Call.isCalled(FuncInfo.Func))
+ continue;
+ const MemRegion *VAList =
+ getVAListAsRegion(Call.getArgSVal(FuncInfo.VAListPos), C);
+ if (!VAList)
+ return;
+
+ if (C.getState()->contains<InitializedVALists>(VAList))
+ return;
+
+ SmallString<80> Errmsg("Function '");
+ Errmsg += FuncInfo.Func.getFunctionName();
+ Errmsg += "' is called with an uninitialized va_list argument";
+ reportUninitializedAccess(VAList, Errmsg.c_str(), C);
+ break;
+ }
+ }
+}
+
+void ValistChecker::checkPreStmt(const VAArgExpr *VAA,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal VAListSVal = State->getSVal(VAA->getSubExpr(), C.getLocationContext());
+ const MemRegion *VAList = getVAListAsRegion(VAListSVal, C);
+ if (!VAList)
+ return;
+ if (!State->contains<InitializedVALists>(VAList))
+ reportUninitializedAccess(
+ VAList, "va_arg() is called on an uninitialized va_list", C);
+}
+
+void ValistChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ InitializedVAListsTy TrackedVALists = State->get<InitializedVALists>();
+ RegionVector LeakedVALists;
+ for (auto Reg : TrackedVALists) {
+ if (SR.isLiveRegion(Reg))
+ continue;
+ LeakedVALists.push_back(Reg);
+ State = State->remove<InitializedVALists>(Reg);
+ }
+ if (ExplodedNode *N = C.addTransition(State))
+ reportLeakedVALists(LeakedVALists, "Initialized va_list", " is leaked", C,
+ N);
+}
+
+const MemRegion *ValistChecker::getVAListAsRegion(SVal SV,
+ CheckerContext &C) const {
+ const MemRegion *Reg = SV.getAsRegion();
+ const auto *TReg = dyn_cast_or_null<TypedValueRegion>(Reg);
+ // Some VarRegion based VLAs reach here as ElementRegions.
+ const auto *EReg = dyn_cast_or_null<ElementRegion>(TReg);
+ return EReg ? EReg->getSuperRegion() : TReg;
+}
+
+// This function traverses the exploded graph backwards and finds the node where
+// the va_list is initialized. That node is used for uniquing the bug paths.
+// It is not likely that there are several different va_lists that belongs to
+// different stack frames, so that case is not yet handled.
+const ExplodedNode *ValistChecker::getStartCallSite(const ExplodedNode *N,
+ const MemRegion *Reg,
+ CheckerContext &C) const {
+ const LocationContext *LeakContext = N->getLocationContext();
+ const ExplodedNode *StartCallNode = N;
+
+ bool FoundInitializedState = false;
+
+ while (N) {
+ ProgramStateRef State = N->getState();
+ if (!State->contains<InitializedVALists>(Reg)) {
+ if (FoundInitializedState)
+ break;
+ } else {
+ FoundInitializedState = true;
+ }
+ const LocationContext *NContext = N->getLocationContext();
+ if (NContext == LeakContext || NContext->isParentOf(LeakContext))
+ StartCallNode = N;
+ N = N->pred_empty() ? nullptr : *(N->pred_begin());
+ }
+
+ return StartCallNode;
+}
+
+void ValistChecker::reportUninitializedAccess(const MemRegion *VAList,
+ StringRef Msg,
+ CheckerContext &C) const {
+ if (!ChecksEnabled[CK_Uninitialized])
+ return;
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ if (!BT_uninitaccess)
+ BT_uninitaccess.reset(new BugType(CheckNames[CK_Uninitialized],
+ "Uninitialized va_list",
+ "Memory Error"));
+ auto R = llvm::make_unique<BugReport>(*BT_uninitaccess, Msg, N);
+ R->markInteresting(VAList);
+ R->addVisitor(llvm::make_unique<ValistBugVisitor>(VAList));
+ C.emitReport(std::move(R));
+ }
+}
+
+void ValistChecker::reportLeakedVALists(const RegionVector &LeakedVALists,
+ StringRef Msg1, StringRef Msg2,
+ CheckerContext &C, ExplodedNode *N,
+ bool ForceReport) const {
+ if (!(ChecksEnabled[CK_Unterminated] ||
+ (ChecksEnabled[CK_Uninitialized] && ForceReport)))
+ return;
+ for (auto Reg : LeakedVALists) {
+ if (!BT_leakedvalist) {
+ BT_leakedvalist.reset(new BugType(CheckNames[CK_Unterminated],
+ "Leaked va_list", "Memory Error"));
+ BT_leakedvalist->setSuppressOnSink(true);
+ }
+
+ const ExplodedNode *StartNode = getStartCallSite(N, Reg, C);
+ PathDiagnosticLocation LocUsedForUniqueing;
+
+ if (const Stmt *StartCallStmt = PathDiagnosticLocation::getStmt(StartNode))
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
+ StartCallStmt, C.getSourceManager(), StartNode->getLocationContext());
+
+ SmallString<100> Buf;
+ llvm::raw_svector_ostream OS(Buf);
+ OS << Msg1;
+ std::string VariableName = Reg->getDescriptiveName();
+ if (!VariableName.empty())
+ OS << " " << VariableName;
+ OS << Msg2;
+
+ auto R = llvm::make_unique<BugReport>(
+ *BT_leakedvalist, OS.str(), N, LocUsedForUniqueing,
+ StartNode->getLocationContext()->getDecl());
+ R->markInteresting(Reg);
+ R->addVisitor(llvm::make_unique<ValistBugVisitor>(Reg, true));
+ C.emitReport(std::move(R));
+ }
+}
+
+void ValistChecker::checkVAListStartCall(const CallEvent &Call,
+ CheckerContext &C, bool IsCopy) const {
+ const MemRegion *VAList = getVAListAsRegion(Call.getArgSVal(0), C);
+ ProgramStateRef State = C.getState();
+ if (!VAList)
+ return;
+
+ if (IsCopy) {
+ const MemRegion *Arg2 = getVAListAsRegion(Call.getArgSVal(1), C);
+ if (Arg2) {
+ if (ChecksEnabled[CK_CopyToSelf] && VAList == Arg2) {
+ RegionVector LeakedVALists{VAList};
+ if (ExplodedNode *N = C.addTransition(State))
+ reportLeakedVALists(LeakedVALists, "va_list",
+ " is copied onto itself", C, N, true);
+ return;
+ } else if (!State->contains<InitializedVALists>(Arg2)) {
+ if (State->contains<InitializedVALists>(VAList)) {
+ State = State->remove<InitializedVALists>(VAList);
+ RegionVector LeakedVALists{VAList};
+ if (ExplodedNode *N = C.addTransition(State))
+ reportLeakedVALists(LeakedVALists, "Initialized va_list",
+ " is overwritten by an uninitialized one", C, N,
+ true);
+ } else {
+ reportUninitializedAccess(Arg2, "Uninitialized va_list is copied", C);
+ }
+ return;
+ }
+ }
+ }
+ if (State->contains<InitializedVALists>(VAList)) {
+ RegionVector LeakedVALists{VAList};
+ if (ExplodedNode *N = C.addTransition(State))
+ reportLeakedVALists(LeakedVALists, "Initialized va_list",
+ " is initialized again", C, N);
+ return;
+ }
+
+ State = State->add<InitializedVALists>(VAList);
+ C.addTransition(State);
+}
+
+void ValistChecker::checkVAListEndCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const MemRegion *VAList = getVAListAsRegion(Call.getArgSVal(0), C);
+ if (!VAList)
+ return;
+
+ if (!C.getState()->contains<InitializedVALists>(VAList)) {
+ reportUninitializedAccess(
+ VAList, "va_end() is called on an uninitialized va_list", C);
+ return;
+ }
+ ProgramStateRef State = C.getState();
+ State = State->remove<InitializedVALists>(VAList);
+ C.addTransition(State);
+}
+
+PathDiagnosticPiece *ValistChecker::ValistBugVisitor::VisitNode(
+ const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
+ BugReport &BR) {
+ ProgramStateRef State = N->getState();
+ ProgramStateRef StatePrev = PrevN->getState();
+
+ const Stmt *S = PathDiagnosticLocation::getStmt(N);
+ if (!S)
+ return nullptr;
+
+ StringRef Msg;
+ if (State->contains<InitializedVALists>(Reg) &&
+ !StatePrev->contains<InitializedVALists>(Reg))
+ Msg = "Initialized va_list";
+ else if (!State->contains<InitializedVALists>(Reg) &&
+ StatePrev->contains<InitializedVALists>(Reg))
+ Msg = "Ended va_list";
+
+ if (Msg.empty())
+ return nullptr;
+
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, Msg, true);
+}
+
+#define REGISTER_CHECKER(name) \
+ void ento::register##name##Checker(CheckerManager &mgr) { \
+ ValistChecker *checker = mgr.registerChecker<ValistChecker>(); \
+ checker->ChecksEnabled[ValistChecker::CK_##name] = true; \
+ checker->CheckNames[ValistChecker::CK_##name] = mgr.getCurrentCheckName(); \
+ }
+
+REGISTER_CHECKER(Uninitialized)
+REGISTER_CHECKER(Unterminated)
+REGISTER_CHECKER(CopyToSelf)
diff --git a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
index 550250302611..15e8ea31c4c4 100644
--- a/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
+++ b/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -32,6 +32,18 @@ class WalkAST : public StmtVisitor<WalkAST> {
BugReporter &BR;
AnalysisDeclContext *AC;
+ /// The root constructor or destructor whose callees are being analyzed.
+ const CXXMethodDecl *RootMethod = nullptr;
+
+ /// Whether the checker should walk into bodies of called functions.
+ /// Controlled by the "Interprocedural" analyzer-config option.
+ bool IsInterprocedural = false;
+
+ /// Whether the checker should only warn for calls to pure virtual functions
+ /// (which is undefined behavior) or for all virtual functions (which may
+ /// may result in unexpected behavior).
+ bool ReportPureOnly = false;
+
typedef const CallExpr * WorkListUnit;
typedef SmallVector<WorkListUnit, 20> DFSWorkList;
@@ -59,9 +71,16 @@ class WalkAST : public StmtVisitor<WalkAST> {
const CallExpr *visitingCallExpr;
public:
- WalkAST(const CheckerBase *checker, BugReporter &br,
- AnalysisDeclContext *ac)
- : Checker(checker), BR(br), AC(ac), visitingCallExpr(nullptr) {}
+ WalkAST(const CheckerBase *checker, BugReporter &br, AnalysisDeclContext *ac,
+ const CXXMethodDecl *rootMethod, bool isInterprocedural,
+ bool reportPureOnly)
+ : Checker(checker), BR(br), AC(ac), RootMethod(rootMethod),
+ IsInterprocedural(isInterprocedural), ReportPureOnly(reportPureOnly),
+ visitingCallExpr(nullptr) {
+ // Walking should always start from either a constructor or a destructor.
+ assert(isa<CXXConstructorDecl>(rootMethod) ||
+ isa<CXXDestructorDecl>(rootMethod));
+ }
bool hasWork() const { return !WList.empty(); }
@@ -132,7 +151,8 @@ void WalkAST::VisitChildren(Stmt *S) {
void WalkAST::VisitCallExpr(CallExpr *CE) {
VisitChildren(CE);
- Enqueue(CE);
+ if (IsInterprocedural)
+ Enqueue(CE);
}
void WalkAST::VisitCXXMemberCallExpr(CallExpr *CE) {
@@ -164,51 +184,64 @@ void WalkAST::VisitCXXMemberCallExpr(CallExpr *CE) {
!MD->getParent()->hasAttr<FinalAttr>())
ReportVirtualCall(CE, MD->isPure());
- Enqueue(CE);
+ if (IsInterprocedural)
+ Enqueue(CE);
}
void WalkAST::ReportVirtualCall(const CallExpr *CE, bool isPure) {
+ if (ReportPureOnly && !isPure)
+ return;
+
SmallString<100> buf;
llvm::raw_svector_ostream os(buf);
- os << "Call Path : ";
- // Name of current visiting CallExpr.
- os << *CE->getDirectCallee();
-
- // Name of the CallExpr whose body is current walking.
- if (visitingCallExpr)
- os << " <-- " << *visitingCallExpr->getDirectCallee();
- // Names of FunctionDecls in worklist with state PostVisited.
- for (SmallVectorImpl<const CallExpr *>::iterator I = WList.end(),
+ // FIXME: The interprocedural diagnostic experience here is not good.
+ // Ultimately this checker should be re-written to be path sensitive.
+ // For now, only diagnose intraprocedurally, by default.
+ if (IsInterprocedural) {
+ os << "Call Path : ";
+ // Name of current visiting CallExpr.
+ os << *CE->getDirectCallee();
+
+ // Name of the CallExpr whose body is current being walked.
+ if (visitingCallExpr)
+ os << " <-- " << *visitingCallExpr->getDirectCallee();
+ // Names of FunctionDecls in worklist with state PostVisited.
+ for (SmallVectorImpl<const CallExpr *>::iterator I = WList.end(),
E = WList.begin(); I != E; --I) {
- const FunctionDecl *FD = (*(I-1))->getDirectCallee();
- assert(FD);
- if (VisitedFunctions[FD] == PostVisited)
- os << " <-- " << *FD;
+ const FunctionDecl *FD = (*(I-1))->getDirectCallee();
+ assert(FD);
+ if (VisitedFunctions[FD] == PostVisited)
+ os << " <-- " << *FD;
+ }
+
+ os << "\n";
}
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
SourceRange R = CE->getCallee()->getSourceRange();
- if (isPure) {
- os << "\n" << "Call pure virtual functions during construction or "
- << "destruction may leads undefined behaviour";
- BR.EmitBasicReport(AC->getDecl(), Checker,
- "Call pure virtual function during construction or "
- "Destruction",
- "Cplusplus", os.str(), CELoc, R);
- return;
- }
- else {
- os << "\n" << "Call virtual functions during construction or "
- << "destruction will never go to a more derived class";
- BR.EmitBasicReport(AC->getDecl(), Checker,
- "Call virtual function during construction or "
- "Destruction",
- "Cplusplus", os.str(), CELoc, R);
- return;
- }
+ os << "Call to ";
+ if (isPure)
+ os << "pure ";
+
+ os << "virtual function during ";
+
+ if (isa<CXXConstructorDecl>(RootMethod))
+ os << "construction ";
+ else
+ os << "destruction ";
+
+ if (isPure)
+ os << "has undefined behavior";
+ else
+ os << "will not dispatch to derived class";
+
+ BR.EmitBasicReport(AC->getDecl(), Checker,
+ "Call to virtual function during construction or "
+ "destruction",
+ "C++ Object Lifecycle", os.str(), CELoc, R);
}
//===----------------------------------------------------------------------===//
@@ -218,14 +251,18 @@ void WalkAST::ReportVirtualCall(const CallExpr *CE, bool isPure) {
namespace {
class VirtualCallChecker : public Checker<check::ASTDecl<CXXRecordDecl> > {
public:
+ DefaultBool isInterprocedural;
+ DefaultBool isPureOnly;
+
void checkASTDecl(const CXXRecordDecl *RD, AnalysisManager& mgr,
BugReporter &BR) const {
- WalkAST walker(this, BR, mgr.getAnalysisDeclContext(RD));
+ AnalysisDeclContext *ADC = mgr.getAnalysisDeclContext(RD);
// Check the constructors.
for (const auto *I : RD->ctors()) {
if (!I->isCopyOrMoveConstructor())
if (Stmt *Body = I->getBody()) {
+ WalkAST walker(this, BR, ADC, I, isInterprocedural, isPureOnly);
walker.Visit(Body);
walker.Execute();
}
@@ -234,6 +271,7 @@ public:
// Check the destructor.
if (CXXDestructorDecl *DD = RD->getDestructor())
if (Stmt *Body = DD->getBody()) {
+ WalkAST walker(this, BR, ADC, DD, isInterprocedural, isPureOnly);
walker.Visit(Body);
walker.Execute();
}
@@ -242,5 +280,12 @@ public:
}
void ento::registerVirtualCallChecker(CheckerManager &mgr) {
- mgr.registerChecker<VirtualCallChecker>();
+ VirtualCallChecker *checker = mgr.registerChecker<VirtualCallChecker>();
+ checker->isInterprocedural =
+ mgr.getAnalyzerOptions().getBooleanOption("Interprocedural", false,
+ checker);
+
+ checker->isPureOnly =
+ mgr.getAnalyzerOptions().getBooleanOption("PureOnly", false,
+ checker);
}
diff --git a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index 54c668cd2d6f..15422633ba33 100644
--- a/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -23,6 +23,25 @@ using namespace clang;
using namespace ento;
using namespace llvm;
+std::vector<StringRef>
+AnalyzerOptions::getRegisteredCheckers(bool IncludeExperimental /* = false */) {
+ static const StringRef StaticAnalyzerChecks[] = {
+#define GET_CHECKERS
+#define CHECKER(FULLNAME, CLASS, DESCFILE, HELPTEXT, GROUPINDEX, HIDDEN) \
+ FULLNAME,
+#include "clang/StaticAnalyzer/Checkers/Checkers.inc"
+#undef CHECKER
+#undef GET_CHECKERS
+ };
+ std::vector<StringRef> Result;
+ for (StringRef CheckName : StaticAnalyzerChecks) {
+ if (!CheckName.startswith("debug.") &&
+ (IncludeExperimental || !CheckName.startswith("alpha.")))
+ Result.push_back(CheckName);
+ }
+ return Result;
+}
+
AnalyzerOptions::UserModeKind AnalyzerOptions::getUserMode() {
if (UserMode == UMK_NotSet) {
StringRef ModeStr =
@@ -344,3 +363,10 @@ bool AnalyzerOptions::shouldWidenLoops() {
WidenLoops = getBooleanOption("widen-loops", /*Default=*/false);
return WidenLoops.getValue();
}
+
+bool AnalyzerOptions::shouldDisplayNotesAsEvents() {
+ if (!DisplayNotesAsEvents.hasValue())
+ DisplayNotesAsEvents =
+ getBooleanOption("notes-as-events", /*Default=*/false);
+ return DisplayNotesAsEvents.getValue();
+}
diff --git a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index 3c3f41a885e9..ebbace4e33b3 100644
--- a/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -33,6 +33,13 @@ void LazyCompoundValData::Profile(llvm::FoldingSetNodeID& ID,
ID.AddPointer(region);
}
+void PointerToMemberData::Profile(
+ llvm::FoldingSetNodeID& ID, const DeclaratorDecl *D,
+ llvm::ImmutableList<const CXXBaseSpecifier *> L) {
+ ID.AddPointer(D);
+ ID.AddPointer(L.getInternalPointer());
+}
+
typedef std::pair<SVal, uintptr_t> SValData;
typedef std::pair<SVal, SVal> SValPair;
@@ -142,6 +149,49 @@ BasicValueFactory::getLazyCompoundValData(const StoreRef &store,
return D;
}
+const PointerToMemberData *BasicValueFactory::getPointerToMemberData(
+ const DeclaratorDecl *DD, llvm::ImmutableList<const CXXBaseSpecifier*> L) {
+ llvm::FoldingSetNodeID ID;
+ PointerToMemberData::Profile(ID, DD, L);
+ void *InsertPos;
+
+ PointerToMemberData *D =
+ PointerToMemberDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!D) {
+ D = (PointerToMemberData*) BPAlloc.Allocate<PointerToMemberData>();
+ new (D) PointerToMemberData(DD, L);
+ PointerToMemberDataSet.InsertNode(D, InsertPos);
+ }
+
+ return D;
+}
+
+const clang::ento::PointerToMemberData *BasicValueFactory::accumCXXBase(
+ llvm::iterator_range<CastExpr::path_const_iterator> PathRange,
+ const nonloc::PointerToMember &PTM) {
+ nonloc::PointerToMember::PTMDataType PTMDT = PTM.getPTMData();
+ const DeclaratorDecl *DD = nullptr;
+ llvm::ImmutableList<const CXXBaseSpecifier *> PathList;
+
+ if (PTMDT.isNull() || PTMDT.is<const DeclaratorDecl *>()) {
+ if (PTMDT.is<const DeclaratorDecl *>())
+ DD = PTMDT.get<const DeclaratorDecl *>();
+
+ PathList = CXXBaseListFactory.getEmptyList();
+ } else { // const PointerToMemberData *
+ const PointerToMemberData *PTMD =
+ PTMDT.get<const PointerToMemberData *>();
+ DD = PTMD->getDeclaratorDecl();
+
+ PathList = PTMD->getCXXBaseList();
+ }
+
+ for (const auto &I : llvm::reverse(PathRange))
+ PathList = prependCXXBase(I, PathList);
+ return getPointerToMemberData(DD, PathList);
+}
+
const llvm::APSInt*
BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
const llvm::APSInt& V1, const llvm::APSInt& V2) {
diff --git a/lib/StaticAnalyzer/Core/BugReporter.cpp b/lib/StaticAnalyzer/Core/BugReporter.cpp
index 488126b0088a..53b4e699f7ad 100644
--- a/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Analysis/ProgramPoint.h"
#include "clang/Basic/SourceManager.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
@@ -78,7 +79,9 @@ static PathDiagnosticEventPiece *
eventsDescribeSameCondition(PathDiagnosticEventPiece *X,
PathDiagnosticEventPiece *Y) {
// Prefer diagnostics that come from ConditionBRVisitor over
- // those that came from TrackConstraintBRVisitor.
+ // those that came from TrackConstraintBRVisitor,
+ // unless the one from ConditionBRVisitor is
+ // its generic fallback diagnostic.
const void *tagPreferred = ConditionBRVisitor::getTag();
const void *tagLesser = TrackConstraintBRVisitor::getTag();
@@ -86,10 +89,10 @@ eventsDescribeSameCondition(PathDiagnosticEventPiece *X,
return nullptr;
if (X->getTag() == tagPreferred && Y->getTag() == tagLesser)
- return X;
+ return ConditionBRVisitor::isPieceMessageGeneric(X) ? Y : X;
if (Y->getTag() == tagPreferred && X->getTag() == tagLesser)
- return Y;
+ return ConditionBRVisitor::isPieceMessageGeneric(Y) ? X : Y;
return nullptr;
}
@@ -112,15 +115,15 @@ static void removeRedundantMsgs(PathPieces &path) {
path.pop_front();
switch (piece->getKind()) {
- case clang::ento::PathDiagnosticPiece::Call:
+ case PathDiagnosticPiece::Call:
removeRedundantMsgs(cast<PathDiagnosticCallPiece>(piece)->path);
break;
- case clang::ento::PathDiagnosticPiece::Macro:
+ case PathDiagnosticPiece::Macro:
removeRedundantMsgs(cast<PathDiagnosticMacroPiece>(piece)->subPieces);
break;
- case clang::ento::PathDiagnosticPiece::ControlFlow:
+ case PathDiagnosticPiece::ControlFlow:
break;
- case clang::ento::PathDiagnosticPiece::Event: {
+ case PathDiagnosticPiece::Event: {
if (i == N-1)
break;
@@ -140,6 +143,8 @@ static void removeRedundantMsgs(PathPieces &path) {
}
break;
}
+ case PathDiagnosticPiece::Note:
+ break;
}
path.push_back(piece);
}
@@ -197,6 +202,9 @@ static bool removeUnneededCalls(PathPieces &pieces, BugReport *R,
}
case PathDiagnosticPiece::ControlFlow:
break;
+
+ case PathDiagnosticPiece::Note:
+ break;
}
pieces.push_back(piece);
@@ -3104,6 +3112,7 @@ bool GRBugReporter::generatePathDiagnostic(PathDiagnostic& PD,
R->addVisitor(llvm::make_unique<NilReceiverBRVisitor>());
R->addVisitor(llvm::make_unique<ConditionBRVisitor>());
R->addVisitor(llvm::make_unique<LikelyFalsePositiveSuppressionBRVisitor>());
+ R->addVisitor(llvm::make_unique<CXXSelfAssignmentBRVisitor>());
BugReport::VisitorList visitors;
unsigned origReportConfigToken, finalReportConfigToken;
@@ -3277,6 +3286,19 @@ struct FRIEC_WLItem {
};
}
+static const CFGBlock *findBlockForNode(const ExplodedNode *N) {
+ ProgramPoint P = N->getLocation();
+ if (auto BEP = P.getAs<BlockEntrance>())
+ return BEP->getBlock();
+
+ // Find the node's current statement in the CFG.
+ if (const Stmt *S = PathDiagnosticLocation::getStmt(N))
+ return N->getLocationContext()->getAnalysisDeclContext()
+ ->getCFGStmtMap()->getBlock(S);
+
+ return nullptr;
+}
+
static BugReport *
FindReportInEquivalenceClass(BugReportEquivClass& EQ,
SmallVectorImpl<BugReport*> &bugReports) {
@@ -3325,6 +3347,18 @@ FindReportInEquivalenceClass(BugReportEquivClass& EQ,
continue;
}
+ // See if we are in a no-return CFG block. If so, treat this similarly
+ // to being post-dominated by a sink. This works better when the analysis
+ // is incomplete and we have never reached a no-return function
+ // we're post-dominated by.
+ // This is not quite enough to handle the incomplete analysis case.
+ // We may be post-dominated in subsequent blocks, or even
+ // inter-procedurally. However, it is not clear if more complicated
+ // cases are generally worth suppressing.
+ if (const CFGBlock *B = findBlockForNode(errorNode))
+ if (B->hasNoReturnElement())
+ continue;
+
// At this point we know that 'N' is not a sink and it has at least one
// successor. Use a DFS worklist to find a non-sink end-of-path node.
typedef FRIEC_WLItem WLItem;
@@ -3402,25 +3436,28 @@ void BugReporter::FlushReport(BugReport *exampleReport,
exampleReport->getUniqueingLocation(),
exampleReport->getUniqueingDecl()));
- MaxBugClassSize = std::max(bugReports.size(),
- static_cast<size_t>(MaxBugClassSize));
+ if (exampleReport->isPathSensitive()) {
+ // Generate the full path diagnostic, using the generation scheme
+ // specified by the PathDiagnosticConsumer. Note that we have to generate
+ // path diagnostics even for consumers which do not support paths, because
+ // the BugReporterVisitors may mark this bug as a false positive.
+ assert(!bugReports.empty());
+
+ MaxBugClassSize =
+ std::max(bugReports.size(), static_cast<size_t>(MaxBugClassSize));
- // Generate the full path diagnostic, using the generation scheme
- // specified by the PathDiagnosticConsumer. Note that we have to generate
- // path diagnostics even for consumers which do not support paths, because
- // the BugReporterVisitors may mark this bug as a false positive.
- if (!bugReports.empty())
if (!generatePathDiagnostic(*D.get(), PD, bugReports))
return;
- MaxValidBugClassSize = std::max(bugReports.size(),
- static_cast<size_t>(MaxValidBugClassSize));
+ MaxValidBugClassSize =
+ std::max(bugReports.size(), static_cast<size_t>(MaxValidBugClassSize));
- // Examine the report and see if the last piece is in a header. Reset the
- // report location to the last piece in the main source file.
- AnalyzerOptions& Opts = getAnalyzerOptions();
- if (Opts.shouldReportIssuesInMainSourceFile() && !Opts.AnalyzeAll)
- D->resetDiagnosticLocationToMainFile();
+ // Examine the report and see if the last piece is in a header. Reset the
+ // report location to the last piece in the main source file.
+ AnalyzerOptions &Opts = getAnalyzerOptions();
+ if (Opts.shouldReportIssuesInMainSourceFile() && !Opts.AnalyzeAll)
+ D->resetDiagnosticLocationToMainFile();
+ }
// If the path is empty, generate a single step path with the location
// of the issue.
@@ -3433,6 +3470,27 @@ void BugReporter::FlushReport(BugReport *exampleReport,
D->setEndOfPath(std::move(piece));
}
+ PathPieces &Pieces = D->getMutablePieces();
+ if (getAnalyzerOptions().shouldDisplayNotesAsEvents()) {
+ // For path diagnostic consumers that don't support extra notes,
+ // we may optionally convert those to path notes.
+ for (auto I = exampleReport->getNotes().rbegin(),
+ E = exampleReport->getNotes().rend(); I != E; ++I) {
+ PathDiagnosticNotePiece *Piece = I->get();
+ PathDiagnosticEventPiece *ConvertedPiece =
+ new PathDiagnosticEventPiece(Piece->getLocation(),
+ Piece->getString());
+ for (const auto &R: Piece->getRanges())
+ ConvertedPiece->addRange(R);
+
+ Pieces.push_front(ConvertedPiece);
+ }
+ } else {
+ for (auto I = exampleReport->getNotes().rbegin(),
+ E = exampleReport->getNotes().rend(); I != E; ++I)
+ Pieces.push_front(*I);
+ }
+
// Get the meta data.
const BugReport::ExtraTextList &Meta = exampleReport->getExtraText();
for (BugReport::ExtraTextList::const_iterator i = Meta.begin(),
@@ -3517,6 +3575,13 @@ LLVM_DUMP_METHOD void PathDiagnosticMacroPiece::dump() const {
// FIXME: Print which macro is being invoked.
}
+LLVM_DUMP_METHOD void PathDiagnosticNotePiece::dump() const {
+ llvm::errs() << "NOTE\n--------------\n";
+ llvm::errs() << getString() << "\n";
+ llvm::errs() << " ---- at ----\n";
+ getLocation().dump();
+}
+
LLVM_DUMP_METHOD void PathDiagnosticLocation::dump() const {
if (!isValid()) {
llvm::errs() << "<INVALID>\n";
diff --git a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index 0e505463bb5e..7f20f0d7703e 100644
--- a/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Lex/Lexer.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
@@ -916,7 +917,7 @@ static const Expr *peelOffOuterExpr(const Expr *Ex,
if (PropRef && PropRef->isMessagingGetter()) {
const Expr *GetterMessageSend =
POE->getSemanticExpr(POE->getNumSemanticExprs() - 1);
- assert(isa<ObjCMessageExpr>(GetterMessageSend));
+ assert(isa<ObjCMessageExpr>(GetterMessageSend->IgnoreParenCasts()));
return peelOffOuterExpr(GetterMessageSend, N);
}
}
@@ -1271,7 +1272,22 @@ ConditionBRVisitor::VisitTerminator(const Stmt *Term,
BugReporterContext &BRC) {
const Expr *Cond = nullptr;
+ // In the code below, Term is a CFG terminator and Cond is a branch condition
+ // expression upon which the decision is made on this terminator.
+ //
+ // For example, in "if (x == 0)", the "if (x == 0)" statement is a terminator,
+ // and "x == 0" is the respective condition.
+ //
+ // Another example: in "if (x && y)", we've got two terminators and two
+ // conditions due to short-circuit nature of operator "&&":
+ // 1. The "if (x && y)" statement is a terminator,
+ // and "y" is the respective condition.
+ // 2. Also "x && ..." is another terminator,
+ // and "x" is its condition.
+
switch (Term->getStmtClass()) {
+ // FIXME: Stmt::SwitchStmtClass is worth handling, however it is a bit
+ // more tricky because there are more than two branches to account for.
default:
return nullptr;
case Stmt::IfStmtClass:
@@ -1280,6 +1296,24 @@ ConditionBRVisitor::VisitTerminator(const Stmt *Term,
case Stmt::ConditionalOperatorClass:
Cond = cast<ConditionalOperator>(Term)->getCond();
break;
+ case Stmt::BinaryOperatorClass:
+ // When we encounter a logical operator (&& or ||) as a CFG terminator,
+ // then the condition is actually its LHS; otheriwse, we'd encounter
+ // the parent, such as if-statement, as a terminator.
+ const auto *BO = cast<BinaryOperator>(Term);
+ assert(BO->isLogicalOp() &&
+ "CFG terminator is not a short-circuit operator!");
+ Cond = BO->getLHS();
+ break;
+ }
+
+ // However, when we encounter a logical operator as a branch condition,
+ // then the condition is actually its RHS, because LHS would be
+ // the condition for the logical operator terminator.
+ while (const auto *InnerBO = dyn_cast<BinaryOperator>(Cond)) {
+ if (!InnerBO->isLogicalOp())
+ break;
+ Cond = InnerBO->getRHS()->IgnoreParens();
}
assert(Cond);
@@ -1294,34 +1328,54 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
BugReporterContext &BRC,
BugReport &R,
const ExplodedNode *N) {
-
- const Expr *Ex = Cond;
+ // These will be modified in code below, but we need to preserve the original
+ // values in case we want to throw the generic message.
+ const Expr *CondTmp = Cond;
+ bool tookTrueTmp = tookTrue;
while (true) {
- Ex = Ex->IgnoreParenCasts();
- switch (Ex->getStmtClass()) {
+ CondTmp = CondTmp->IgnoreParenCasts();
+ switch (CondTmp->getStmtClass()) {
default:
- return nullptr;
+ break;
case Stmt::BinaryOperatorClass:
- return VisitTrueTest(Cond, cast<BinaryOperator>(Ex), tookTrue, BRC,
- R, N);
+ if (PathDiagnosticPiece *P = VisitTrueTest(
+ Cond, cast<BinaryOperator>(CondTmp), tookTrueTmp, BRC, R, N))
+ return P;
+ break;
case Stmt::DeclRefExprClass:
- return VisitTrueTest(Cond, cast<DeclRefExpr>(Ex), tookTrue, BRC,
- R, N);
+ if (PathDiagnosticPiece *P = VisitTrueTest(
+ Cond, cast<DeclRefExpr>(CondTmp), tookTrueTmp, BRC, R, N))
+ return P;
+ break;
case Stmt::UnaryOperatorClass: {
- const UnaryOperator *UO = cast<UnaryOperator>(Ex);
+ const UnaryOperator *UO = cast<UnaryOperator>(CondTmp);
if (UO->getOpcode() == UO_LNot) {
- tookTrue = !tookTrue;
- Ex = UO->getSubExpr();
+ tookTrueTmp = !tookTrueTmp;
+ CondTmp = UO->getSubExpr();
continue;
}
- return nullptr;
+ break;
}
}
+ break;
}
+
+ // Condition too complex to explain? Just say something so that the user
+ // knew we've made some path decision at this point.
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
+ if (!Loc.isValid() || !Loc.asLocation().isValid())
+ return nullptr;
+
+ PathDiagnosticEventPiece *Event = new PathDiagnosticEventPiece(
+ Loc, tookTrue ? GenericTrueMessage : GenericFalseMessage);
+ return Event;
}
-bool ConditionBRVisitor::patternMatch(const Expr *Ex, raw_ostream &Out,
+bool ConditionBRVisitor::patternMatch(const Expr *Ex,
+ const Expr *ParentEx,
+ raw_ostream &Out,
BugReporterContext &BRC,
BugReport &report,
const ExplodedNode *N,
@@ -1329,6 +1383,47 @@ bool ConditionBRVisitor::patternMatch(const Expr *Ex, raw_ostream &Out,
const Expr *OriginalExpr = Ex;
Ex = Ex->IgnoreParenCasts();
+ // Use heuristics to determine if Ex is a macro expending to a literal and
+ // if so, use the macro's name.
+ SourceLocation LocStart = Ex->getLocStart();
+ SourceLocation LocEnd = Ex->getLocEnd();
+ if (LocStart.isMacroID() && LocEnd.isMacroID() &&
+ (isa<GNUNullExpr>(Ex) ||
+ isa<ObjCBoolLiteralExpr>(Ex) ||
+ isa<CXXBoolLiteralExpr>(Ex) ||
+ isa<IntegerLiteral>(Ex) ||
+ isa<FloatingLiteral>(Ex))) {
+
+ StringRef StartName = Lexer::getImmediateMacroNameForDiagnostics(LocStart,
+ BRC.getSourceManager(), BRC.getASTContext().getLangOpts());
+ StringRef EndName = Lexer::getImmediateMacroNameForDiagnostics(LocEnd,
+ BRC.getSourceManager(), BRC.getASTContext().getLangOpts());
+ bool beginAndEndAreTheSameMacro = StartName.equals(EndName);
+
+ bool partOfParentMacro = false;
+ if (ParentEx->getLocStart().isMacroID()) {
+ StringRef PName = Lexer::getImmediateMacroNameForDiagnostics(
+ ParentEx->getLocStart(), BRC.getSourceManager(),
+ BRC.getASTContext().getLangOpts());
+ partOfParentMacro = PName.equals(StartName);
+ }
+
+ if (beginAndEndAreTheSameMacro && !partOfParentMacro ) {
+ // Get the location of the macro name as written by the caller.
+ SourceLocation Loc = LocStart;
+ while (LocStart.isMacroID()) {
+ Loc = LocStart;
+ LocStart = BRC.getSourceManager().getImmediateMacroCallerLoc(LocStart);
+ }
+ StringRef MacroName = Lexer::getImmediateMacroNameForDiagnostics(
+ Loc, BRC.getSourceManager(), BRC.getASTContext().getLangOpts());
+
+ // Return the macro name.
+ Out << MacroName;
+ return false;
+ }
+ }
+
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex)) {
const bool quotes = isa<VarDecl>(DR->getDecl());
if (quotes) {
@@ -1389,10 +1484,10 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
SmallString<128> LhsString, RhsString;
{
llvm::raw_svector_ostream OutLHS(LhsString), OutRHS(RhsString);
- const bool isVarLHS = patternMatch(BExpr->getLHS(), OutLHS, BRC, R, N,
- shouldPrune);
- const bool isVarRHS = patternMatch(BExpr->getRHS(), OutRHS, BRC, R, N,
- shouldPrune);
+ const bool isVarLHS = patternMatch(BExpr->getLHS(), BExpr, OutLHS,
+ BRC, R, N, shouldPrune);
+ const bool isVarRHS = patternMatch(BExpr->getRHS(), BExpr, OutRHS,
+ BRC, R, N, shouldPrune);
shouldInvert = !isVarLHS && isVarRHS;
}
@@ -1552,6 +1647,17 @@ ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
return event;
}
+const char *const ConditionBRVisitor::GenericTrueMessage =
+ "Assuming the condition is true";
+const char *const ConditionBRVisitor::GenericFalseMessage =
+ "Assuming the condition is false";
+
+bool ConditionBRVisitor::isPieceMessageGeneric(
+ const PathDiagnosticPiece *Piece) {
+ return Piece->getString() == GenericTrueMessage ||
+ Piece->getString() == GenericFalseMessage;
+}
+
std::unique_ptr<PathDiagnosticPiece>
LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
@@ -1693,3 +1799,56 @@ UndefOrNullArgVisitor::VisitNode(const ExplodedNode *N,
}
return nullptr;
}
+
+PathDiagnosticPiece *
+CXXSelfAssignmentBRVisitor::VisitNode(const ExplodedNode *Succ,
+ const ExplodedNode *Pred,
+ BugReporterContext &BRC, BugReport &BR) {
+ if (Satisfied)
+ return nullptr;
+
+ auto Edge = Succ->getLocation().getAs<BlockEdge>();
+ if (!Edge.hasValue())
+ return nullptr;
+
+ auto Tag = Edge->getTag();
+ if (!Tag)
+ return nullptr;
+
+ if (Tag->getTagDescription() != "cplusplus.SelfAssignment")
+ return nullptr;
+
+ Satisfied = true;
+
+ const auto *Met =
+ dyn_cast<CXXMethodDecl>(Succ->getCodeDecl().getAsFunction());
+ assert(Met && "Not a C++ method.");
+ assert((Met->isCopyAssignmentOperator() || Met->isMoveAssignmentOperator()) &&
+ "Not a copy/move assignment operator.");
+
+ const auto *LCtx = Edge->getLocationContext();
+
+ const auto &State = Succ->getState();
+ auto &SVB = State->getStateManager().getSValBuilder();
+
+ const auto Param =
+ State->getSVal(State->getRegion(Met->getParamDecl(0), LCtx));
+ const auto This =
+ State->getSVal(SVB.getCXXThis(Met, LCtx->getCurrentStackFrame()));
+
+ auto L = PathDiagnosticLocation::create(Met, BRC.getSourceManager());
+
+ if (!L.isValid() || !L.asLocation().isValid())
+ return nullptr;
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream Out(Buf);
+
+ Out << "Assuming " << Met->getParamDecl(0)->getName() <<
+ ((Param == This) ? " == " : " != ") << "*this";
+
+ auto *Piece = new PathDiagnosticEventPiece(L, Out.str());
+ Piece->addRange(Met->getSourceRange());
+
+ return Piece;
+}
diff --git a/lib/StaticAnalyzer/Core/CallEvent.cpp b/lib/StaticAnalyzer/Core/CallEvent.cpp
index 52613186677a..420e2a6b5c8c 100644
--- a/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -382,6 +382,11 @@ bool AnyFunctionCall::argumentsMayEscape() const {
if (II->isStr("funopen"))
return true;
+ // - __cxa_demangle - can reallocate memory and can return the pointer to
+ // the input buffer.
+ if (II->isStr("__cxa_demangle"))
+ return true;
+
StringRef FName = II->getName();
// - CoreFoundation functions that end with "NoCopy" can free a passed-in
@@ -552,7 +557,7 @@ void CXXInstanceCall::getInitialStackFrameContents(
// FIXME: CallEvent maybe shouldn't be directly accessing StoreManager.
bool Failed;
- ThisVal = StateMgr.getStoreManager().evalDynamicCast(ThisVal, Ty, Failed);
+ ThisVal = StateMgr.getStoreManager().attemptDownCast(ThisVal, Ty, Failed);
assert(!Failed && "Calling an incorrectly devirtualized method");
}
diff --git a/lib/StaticAnalyzer/Core/CheckerManager.cpp b/lib/StaticAnalyzer/Core/CheckerManager.cpp
index d8382e88691a..79e204cdafec 100644
--- a/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -518,15 +518,6 @@ void CheckerManager::runCheckersForDeadSymbols(ExplodedNodeSet &Dst,
expandGraphWithCheckers(C, Dst, Src);
}
-/// \brief True if at least one checker wants to check region changes.
-bool CheckerManager::wantsRegionChangeUpdate(ProgramStateRef state) {
- for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i)
- if (RegionChangesCheckers[i].WantUpdateFn(state))
- return true;
-
- return false;
-}
-
/// \brief Run checkers for region changes.
ProgramStateRef
CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
@@ -539,8 +530,8 @@ CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
// bail out.
if (!state)
return nullptr;
- state = RegionChangesCheckers[i].CheckFn(state, invalidated,
- ExplicitRegions, Regions, Call);
+ state = RegionChangesCheckers[i](state, invalidated,
+ ExplicitRegions, Regions, Call);
}
return state;
}
@@ -726,10 +717,8 @@ void CheckerManager::_registerForDeadSymbols(CheckDeadSymbolsFunc checkfn) {
DeadSymbolsCheckers.push_back(checkfn);
}
-void CheckerManager::_registerForRegionChanges(CheckRegionChangesFunc checkfn,
- WantsRegionChangeUpdateFunc wantUpdateFn) {
- RegionChangesCheckerInfo info = {checkfn, wantUpdateFn};
- RegionChangesCheckers.push_back(info);
+void CheckerManager::_registerForRegionChanges(CheckRegionChangesFunc checkfn) {
+ RegionChangesCheckers.push_back(checkfn);
}
void CheckerManager::_registerForPointerEscape(CheckPointerEscapeFunc checkfn){
diff --git a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
index ba03e2f8a3c1..c9cb189a5b72 100644
--- a/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
+++ b/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
@@ -175,3 +175,22 @@ void CheckerRegistry::printHelp(raw_ostream &out,
out << '\n';
}
}
+
+void CheckerRegistry::printList(
+ raw_ostream &out, SmallVectorImpl<CheckerOptInfo> &opts) const {
+ std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
+
+ // Collect checkers enabled by the options.
+ CheckerInfoSet enabledCheckers;
+ for (SmallVectorImpl<CheckerOptInfo>::iterator i = opts.begin(),
+ e = opts.end();
+ i != e; ++i) {
+ collectCheckers(Checkers, Packages, *i, enabledCheckers);
+ }
+
+ for (CheckerInfoSet::const_iterator i = enabledCheckers.begin(),
+ e = enabledCheckers.end();
+ i != e; ++i) {
+ out << (*i)->FullName << '\n';
+ }
+}
diff --git a/lib/StaticAnalyzer/Core/CoreEngine.cpp b/lib/StaticAnalyzer/Core/CoreEngine.cpp
index da608f6c7558..4e2866c56f0e 100644
--- a/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -18,7 +18,6 @@
#include "clang/AST/StmtCXX.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Casting.h"
@@ -310,8 +309,19 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
assert (L.getLocationContext()->getCFG()->getExit().size() == 0
&& "EXIT block cannot contain Stmts.");
+ // Get return statement..
+ const ReturnStmt *RS = nullptr;
+ if (!L.getSrc()->empty()) {
+ if (Optional<CFGStmt> LastStmt = L.getSrc()->back().getAs<CFGStmt>()) {
+ if ((RS = dyn_cast<ReturnStmt>(LastStmt->getStmt()))) {
+ if (!RS->getRetValue())
+ RS = nullptr;
+ }
+ }
+ }
+
// Process the final state transition.
- SubEng.processEndOfFunction(BuilderCtx, Pred);
+ SubEng.processEndOfFunction(BuilderCtx, Pred, RS);
// This path is done. Don't enqueue any more nodes.
return;
@@ -590,13 +600,14 @@ void CoreEngine::enqueueStmtNode(ExplodedNode *N,
WList->enqueue(Succ, Block, Idx+1);
}
-ExplodedNode *CoreEngine::generateCallExitBeginNode(ExplodedNode *N) {
+ExplodedNode *CoreEngine::generateCallExitBeginNode(ExplodedNode *N,
+ const ReturnStmt *RS) {
// Create a CallExitBegin node and enqueue it.
const StackFrameContext *LocCtx
= cast<StackFrameContext>(N->getLocationContext());
// Use the callee location context.
- CallExitBegin Loc(LocCtx);
+ CallExitBegin Loc(LocCtx, RS);
bool isNew;
ExplodedNode *Node = G.getNode(Loc, N->getState(), false, &isNew);
@@ -620,12 +631,12 @@ void CoreEngine::enqueue(ExplodedNodeSet &Set,
}
}
-void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set) {
+void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set, const ReturnStmt *RS) {
for (ExplodedNodeSet::iterator I = Set.begin(), E = Set.end(); I != E; ++I) {
ExplodedNode *N = *I;
// If we are in an inlined call, generate CallExitBegin node.
if (N->getLocationContext()->getParent()) {
- N = generateCallExitBeginNode(N);
+ N = generateCallExitBeginNode(N, RS);
if (N)
WList->enqueue(N);
} else {
diff --git a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
index 02d382cc4885..3bc8e09333b9 100644
--- a/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
+++ b/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -17,11 +17,9 @@
#include "clang/AST/Stmt.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include <vector>
using namespace clang;
using namespace ento;
diff --git a/lib/StaticAnalyzer/Core/ExprEngine.cpp b/lib/StaticAnalyzer/Core/ExprEngine.cpp
index 405aecdee032..5b2119aeda27 100644
--- a/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -27,10 +27,9 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h"
-#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/raw_ostream.h"
#ifndef NDEBUG
#include "llvm/Support/GraphWriter.h"
@@ -203,25 +202,32 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
MemRegionManager &MRMgr = StateMgr.getRegionManager();
StoreManager &StoreMgr = StateMgr.getStoreManager();
- // We need to be careful about treating a derived type's value as
- // bindings for a base type. Unless we're creating a temporary pointer region,
- // start by stripping and recording base casts.
- SmallVector<const CastExpr *, 4> Casts;
- const Expr *Inner = Ex->IgnoreParens();
- if (!Loc::isLocType(Result->getType())) {
- while (const CastExpr *CE = dyn_cast<CastExpr>(Inner)) {
- if (CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase)
- Casts.push_back(CE);
- else if (CE->getCastKind() != CK_NoOp)
- break;
+ // MaterializeTemporaryExpr may appear out of place, after a few field and
+ // base-class accesses have been made to the object, even though semantically
+ // it is the whole object that gets materialized and lifetime-extended.
+ //
+ // For example:
+ //
+ // `-MaterializeTemporaryExpr
+ // `-MemberExpr
+ // `-CXXTemporaryObjectExpr
+ //
+ // instead of the more natural
+ //
+ // `-MemberExpr
+ // `-MaterializeTemporaryExpr
+ // `-CXXTemporaryObjectExpr
+ //
+ // Use the usual methods for obtaining the expression of the base object,
+ // and record the adjustments that we need to make to obtain the sub-object
+ // that the whole expression 'Ex' refers to. This trick is usual,
+ // in the sense that CodeGen takes a similar route.
- Inner = CE->getSubExpr()->IgnoreParens();
- }
- }
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+
+ const Expr *Init = Ex->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
- // Create a temporary object region for the inner expression (which may have
- // a more derived type) and bind the value into it.
const TypedValueRegion *TR = nullptr;
if (const MaterializeTemporaryExpr *MT =
dyn_cast<MaterializeTemporaryExpr>(Result)) {
@@ -229,25 +235,37 @@ ExprEngine::createTemporaryRegionIfNeeded(ProgramStateRef State,
// If this object is bound to a reference with static storage duration, we
// put it in a different region to prevent "address leakage" warnings.
if (SD == SD_Static || SD == SD_Thread)
- TR = MRMgr.getCXXStaticTempObjectRegion(Inner);
+ TR = MRMgr.getCXXStaticTempObjectRegion(Init);
}
if (!TR)
- TR = MRMgr.getCXXTempObjectRegion(Inner, LC);
+ TR = MRMgr.getCXXTempObjectRegion(Init, LC);
SVal Reg = loc::MemRegionVal(TR);
+ // Make the necessary adjustments to obtain the sub-object.
+ for (auto I = Adjustments.rbegin(), E = Adjustments.rend(); I != E; ++I) {
+ const SubobjectAdjustment &Adj = *I;
+ switch (Adj.Kind) {
+ case SubobjectAdjustment::DerivedToBaseAdjustment:
+ Reg = StoreMgr.evalDerivedToBase(Reg, Adj.DerivedToBase.BasePath);
+ break;
+ case SubobjectAdjustment::FieldAdjustment:
+ Reg = StoreMgr.getLValueField(Adj.Field, Reg);
+ break;
+ case SubobjectAdjustment::MemberPointerAdjustment:
+ // FIXME: Unimplemented.
+ State->bindDefault(Reg, UnknownVal());
+ return State;
+ }
+ }
+
+ // Try to recover some path sensitivity in case we couldn't compute the value.
if (V.isUnknown())
V = getSValBuilder().conjureSymbolVal(Result, LC, TR->getValueType(),
currBldrCtx->blockCount());
+ // Bind the value of the expression to the sub-object region, and then bind
+ // the sub-object region to our expression.
State = State->bindLoc(Reg, V);
-
- // Re-apply the casts (from innermost to outermost) for type sanity.
- for (SmallVectorImpl<const CastExpr *>::reverse_iterator I = Casts.rbegin(),
- E = Casts.rend();
- I != E; ++I) {
- Reg = StoreMgr.evalDerivedToBase(Reg, *I);
- }
-
State = State->BindExpr(Result, LC, Reg);
return State;
}
@@ -263,10 +281,6 @@ ProgramStateRef ExprEngine::processAssume(ProgramStateRef state,
return getCheckerManager().runCheckersForEvalAssume(state, cond, assumption);
}
-bool ExprEngine::wantsRegionChangeUpdate(ProgramStateRef state) {
- return getCheckerManager().wantsRegionChangeUpdate(state);
-}
-
ProgramStateRef
ExprEngine::processRegionChanges(ProgramStateRef state,
const InvalidatedSymbols *invalidated,
@@ -493,7 +507,7 @@ void ExprEngine::ProcessInitializer(const CFGInitializer Init,
}
SVal InitVal;
- if (BMI->getNumArrayIndices() > 0) {
+ if (Init->getType()->isArrayType()) {
// Handle arrays of trivial type. We can represent this with a
// primitive load/copy from the base array region.
const ArraySubscriptExpr *ASE;
@@ -597,9 +611,9 @@ void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
SVal dest = state->getLValue(varDecl, Pred->getLocationContext());
const MemRegion *Region = dest.castAs<loc::MemRegionVal>().getRegion();
- if (const ReferenceType *refType = varType->getAs<ReferenceType>()) {
- varType = refType->getPointeeType();
- Region = state->getSVal(Region).getAsRegion();
+ if (varType->isReferenceType()) {
+ Region = state->getSVal(Region).getAsRegion()->getBaseRegion();
+ varType = cast<TypedValueRegion>(Region)->getValueType();
}
VisitCXXDestructor(varType, Region, Dtor.getTriggerStmt(), /*IsBase=*/ false,
@@ -847,6 +861,14 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPDistributeParallelForSimdDirectiveClass:
case Stmt::OMPDistributeSimdDirectiveClass:
case Stmt::OMPTargetParallelForSimdDirectiveClass:
+ case Stmt::OMPTargetSimdDirectiveClass:
+ case Stmt::OMPTeamsDistributeDirectiveClass:
+ case Stmt::OMPTeamsDistributeSimdDirectiveClass:
+ case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
+ case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
+ case Stmt::OMPTargetTeamsDirectiveClass:
+ case Stmt::OMPTargetTeamsDistributeDirectiveClass:
+ case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
llvm_unreachable("Stmt should not be in analyzer evaluation loop");
case Stmt::ObjCSubscriptRefExprClass:
@@ -886,6 +908,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
// Cases not handled yet; but will handle some day.
case Stmt::DesignatedInitExprClass:
case Stmt::DesignatedInitUpdateExprClass:
+ case Stmt::ArrayInitLoopExprClass:
+ case Stmt::ArrayInitIndexExprClass:
case Stmt::ExtVectorElementExprClass:
case Stmt::ImaginaryLiteralClass:
case Stmt::ObjCAtCatchStmtClass:
@@ -1211,16 +1235,8 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::ObjCBridgedCastExprClass: {
Bldr.takeNodes(Pred);
const CastExpr *C = cast<CastExpr>(S);
- // Handle the previsit checks.
- ExplodedNodeSet dstPrevisit;
- getCheckerManager().runCheckersForPreStmt(dstPrevisit, Pred, C, *this);
-
- // Handle the expression itself.
ExplodedNodeSet dstExpr;
- for (ExplodedNodeSet::iterator i = dstPrevisit.begin(),
- e = dstPrevisit.end(); i != e ; ++i) {
- VisitCast(C, C->getSubExpr(), *i, dstExpr);
- }
+ VisitCast(C, C->getSubExpr(), Pred, dstExpr);
// Handle the postvisit checks.
getCheckerManager().runCheckersForPostStmt(Dst, dstExpr, C, *this);
@@ -1773,7 +1789,8 @@ void ExprEngine::processBeginOfFunction(NodeBuilderContext &BC,
/// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
/// nodes when the control reaches the end of a function.
void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
- ExplodedNode *Pred) {
+ ExplodedNode *Pred,
+ const ReturnStmt *RS) {
// FIXME: Assert that stackFrameDoesNotContainInitializedTemporaries(*Pred)).
// We currently cannot enable this assert, as lifetime extended temporaries
// are not modelled correctly.
@@ -1795,7 +1812,7 @@ void ExprEngine::processEndOfFunction(NodeBuilderContext& BC,
getCheckerManager().runCheckersForEndFunction(BC, Dst, Pred, *this);
}
- Engine.enqueueEndOfFunction(Dst);
+ Engine.enqueueEndOfFunction(Dst, RS);
}
/// ProcessSwitch - Called by CoreEngine. Used to generate successor
@@ -1841,7 +1858,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
ProgramStateRef StateCase;
if (Optional<NonLoc> NL = CondV.getAs<NonLoc>())
std::tie(StateCase, DefaultSt) =
- DefaultSt->assumeWithinInclusiveRange(*NL, V1, V2);
+ DefaultSt->assumeInclusiveRange(*NL, V1, V2);
else // UnknownVal
StateCase = DefaultSt;
@@ -1975,24 +1992,26 @@ void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A,
const Expr *Base = A->getBase()->IgnoreParens();
const Expr *Idx = A->getIdx()->IgnoreParens();
- ExplodedNodeSet checkerPreStmt;
- getCheckerManager().runCheckersForPreStmt(checkerPreStmt, Pred, A, *this);
+ ExplodedNodeSet CheckerPreStmt;
+ getCheckerManager().runCheckersForPreStmt(CheckerPreStmt, Pred, A, *this);
- StmtNodeBuilder Bldr(checkerPreStmt, Dst, *currBldrCtx);
+ ExplodedNodeSet EvalSet;
+ StmtNodeBuilder Bldr(CheckerPreStmt, EvalSet, *currBldrCtx);
assert(A->isGLValue() ||
(!AMgr.getLangOpts().CPlusPlus &&
A->getType().isCForbiddenLValueType()));
- for (ExplodedNodeSet::iterator it = checkerPreStmt.begin(),
- ei = checkerPreStmt.end(); it != ei; ++it) {
- const LocationContext *LCtx = (*it)->getLocationContext();
- ProgramStateRef state = (*it)->getState();
+ for (auto *Node : CheckerPreStmt) {
+ const LocationContext *LCtx = Node->getLocationContext();
+ ProgramStateRef state = Node->getState();
SVal V = state->getLValue(A->getType(),
state->getSVal(Idx, LCtx),
state->getSVal(Base, LCtx));
- Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V), nullptr,
+ Bldr.generateNode(A, Node, state->BindExpr(A, LCtx, V), nullptr,
ProgramPoint::PostLValueKind);
}
+
+ getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, A, *this);
}
/// VisitMemberExpr - Transfer function for member expressions.
@@ -2051,7 +2070,7 @@ void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
if (!M->isGLValue()) {
assert(M->getType()->isArrayType());
const ImplicitCastExpr *PE =
- dyn_cast<ImplicitCastExpr>((*I)->getParentMap().getParent(M));
+ dyn_cast<ImplicitCastExpr>((*I)->getParentMap().getParentIgnoreParens(M));
if (!PE || PE->getCastKind() != CK_ArrayToPointerDecay) {
llvm_unreachable("should always be wrapped in ArrayToPointerDecay");
}
@@ -2521,26 +2540,10 @@ struct DOTGraphTraits<ExplodedNode*> :
// FIXME: Since we do not cache error nodes in ExprEngine now, this does not
// work.
static std::string getNodeAttributes(const ExplodedNode *N, void*) {
-
-#if 0
- // FIXME: Replace with a general scheme to tell if the node is
- // an error node.
- if (GraphPrintCheckerState->isImplicitNullDeref(N) ||
- GraphPrintCheckerState->isExplicitNullDeref(N) ||
- GraphPrintCheckerState->isUndefDeref(N) ||
- GraphPrintCheckerState->isUndefStore(N) ||
- GraphPrintCheckerState->isUndefControlFlow(N) ||
- GraphPrintCheckerState->isUndefResult(N) ||
- GraphPrintCheckerState->isBadCall(N) ||
- GraphPrintCheckerState->isUndefArg(N))
- return "color=\"red\",style=\"filled\"";
-
- if (GraphPrintCheckerState->isNoReturnCall(N))
- return "color=\"blue\",style=\"filled\"";
-#endif
return "";
}
+ // De-duplicate some source location pretty-printing.
static void printLocation(raw_ostream &Out, SourceLocation SLoc) {
if (SLoc.isFileID()) {
Out << "\\lline="
@@ -2550,6 +2553,12 @@ struct DOTGraphTraits<ExplodedNode*> :
<< "\\l";
}
}
+ static void printLocation2(raw_ostream &Out, SourceLocation SLoc) {
+ if (SLoc.isFileID() && GraphPrintSourceManager->isInMainFile(SLoc))
+ Out << "line " << GraphPrintSourceManager->getExpansionLineNumber(SLoc);
+ else
+ SLoc.print(Out, *GraphPrintSourceManager);
+ }
static std::string getNodeLabel(const ExplodedNode *N, void*){
@@ -2563,12 +2572,6 @@ struct DOTGraphTraits<ExplodedNode*> :
case ProgramPoint::BlockEntranceKind: {
Out << "Block Entrance: B"
<< Loc.castAs<BlockEntrance>().getBlock()->getBlockID();
- if (const NamedDecl *ND =
- dyn_cast<NamedDecl>(Loc.getLocationContext()->getDecl())) {
- Out << " (";
- ND->printName(Out);
- Out << ")";
- }
break;
}
@@ -2693,13 +2696,6 @@ struct DOTGraphTraits<ExplodedNode*> :
Out << "\\l";
}
-#if 0
- // FIXME: Replace with a general scheme to determine
- // the name of the check.
- if (GraphPrintCheckerState->isUndefControlFlow(N)) {
- Out << "\\|Control-flow based on\\lUndefined value.\\l";
- }
-#endif
break;
}
@@ -2721,27 +2717,6 @@ struct DOTGraphTraits<ExplodedNode*> :
else if (Loc.getAs<PostLValue>())
Out << "\\lPostLValue\\l";
-#if 0
- // FIXME: Replace with a general scheme to determine
- // the name of the check.
- if (GraphPrintCheckerState->isImplicitNullDeref(N))
- Out << "\\|Implicit-Null Dereference.\\l";
- else if (GraphPrintCheckerState->isExplicitNullDeref(N))
- Out << "\\|Explicit-Null Dereference.\\l";
- else if (GraphPrintCheckerState->isUndefDeref(N))
- Out << "\\|Dereference of undefialied value.\\l";
- else if (GraphPrintCheckerState->isUndefStore(N))
- Out << "\\|Store to Undefined Loc.";
- else if (GraphPrintCheckerState->isUndefResult(N))
- Out << "\\|Result of operation is undefined.";
- else if (GraphPrintCheckerState->isNoReturnCall(N))
- Out << "\\|Call to function marked \"noreturn\".";
- else if (GraphPrintCheckerState->isBadCall(N))
- Out << "\\|Call to NULL/Undefined.";
- else if (GraphPrintCheckerState->isUndefArg(N))
- Out << "\\|Argument in call is undefined";
-#endif
-
break;
}
}
@@ -2749,6 +2724,40 @@ struct DOTGraphTraits<ExplodedNode*> :
ProgramStateRef state = N->getState();
Out << "\\|StateID: " << (const void*) state.get()
<< " NodeID: " << (const void*) N << "\\|";
+
+ // Analysis stack backtrace.
+ Out << "Location context stack (from current to outer):\\l";
+ const LocationContext *LC = Loc.getLocationContext();
+ unsigned Idx = 0;
+ for (; LC; LC = LC->getParent(), ++Idx) {
+ Out << Idx << ". (" << (const void *)LC << ") ";
+ switch (LC->getKind()) {
+ case LocationContext::StackFrame:
+ if (const NamedDecl *D = dyn_cast<NamedDecl>(LC->getDecl()))
+ Out << "Calling " << D->getQualifiedNameAsString();
+ else
+ Out << "Calling anonymous code";
+ if (const Stmt *S = cast<StackFrameContext>(LC)->getCallSite()) {
+ Out << " at ";
+ printLocation2(Out, S->getLocStart());
+ }
+ break;
+ case LocationContext::Block:
+ Out << "Invoking block";
+ if (const Decl *D = cast<BlockInvocationContext>(LC)->getBlockDecl()) {
+ Out << " defined at ";
+ printLocation2(Out, D->getLocStart());
+ }
+ break;
+ case LocationContext::Scope:
+ Out << "Entering scope";
+ // FIXME: Add more info once ScopeContext is activated.
+ break;
+ }
+ Out << "\\l";
+ }
+ Out << "\\l";
+
state->printDOT(Out);
Out << "\\l";
diff --git a/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 175225ba0de2..89fab1d56af0 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
@@ -246,6 +247,38 @@ void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
getCheckerManager().runCheckersForPostStmt(Dst, Tmp, BE, *this);
}
+ProgramStateRef ExprEngine::handleLValueBitCast(
+ ProgramStateRef state, const Expr* Ex, const LocationContext* LCtx,
+ QualType T, QualType ExTy, const CastExpr* CastE, StmtNodeBuilder& Bldr,
+ ExplodedNode* Pred) {
+ // Delegate to SValBuilder to process.
+ SVal V = state->getSVal(Ex, LCtx);
+ V = svalBuilder.evalCast(V, T, ExTy);
+ // Negate the result if we're treating the boolean as a signed i1
+ if (CastE->getCastKind() == CK_BooleanToSignedIntegral)
+ V = evalMinus(V);
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
+
+ return state;
+}
+
+ProgramStateRef ExprEngine::handleLVectorSplat(
+ ProgramStateRef state, const LocationContext* LCtx, const CastExpr* CastE,
+ StmtNodeBuilder &Bldr, ExplodedNode* Pred) {
+ // Recover some path sensitivity by conjuring a new value.
+ QualType resultType = CastE->getType();
+ if (CastE->isGLValue())
+ resultType = getContext().getPointerType(resultType);
+ SVal result = svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx,
+ resultType,
+ currBldrCtx->blockCount());
+ state = state->BindExpr(CastE, LCtx, result);
+ Bldr.generateNode(CastE, Pred, state);
+
+ return state;
+}
+
void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
@@ -310,8 +343,21 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
continue;
}
case CK_MemberPointerToBoolean:
- // FIXME: For now, member pointers are represented by void *.
- // FALLTHROUGH
+ case CK_PointerToBoolean: {
+ SVal V = state->getSVal(Ex, LCtx);
+ auto PTMSV = V.getAs<nonloc::PointerToMember>();
+ if (PTMSV)
+ V = svalBuilder.makeTruthVal(!PTMSV->isNullMemberPointer(), ExTy);
+ if (V.isUndef() || PTMSV) {
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ // Explicitly proceed with default handler for this case cascade.
+ state =
+ handleLValueBitCast(state, Ex, LCtx, T, ExTy, CastE, Bldr, Pred);
+ continue;
+ }
case CK_Dependent:
case CK_ArrayToPointerDecay:
case CK_BitCast:
@@ -319,8 +365,18 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_BooleanToSignedIntegral:
case CK_NullToPointer:
case CK_IntegralToPointer:
- case CK_PointerToIntegral:
- case CK_PointerToBoolean:
+ case CK_PointerToIntegral: {
+ SVal V = state->getSVal(Ex, LCtx);
+ if (V.getAs<nonloc::PointerToMember>()) {
+ state = state->BindExpr(CastE, LCtx, UnknownVal());
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ // Explicitly proceed with default handler for this case cascade.
+ state =
+ handleLValueBitCast(state, Ex, LCtx, T, ExTy, CastE, Bldr, Pred);
+ continue;
+ }
case CK_IntegralToBoolean:
case CK_IntegralToFloating:
case CK_FloatingToIntegral:
@@ -341,15 +397,11 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_AnyPointerToBlockPointerCast:
case CK_ObjCObjectLValueCast:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
+ case CK_IntToOCLSampler:
case CK_LValueBitCast: {
- // Delegate to SValBuilder to process.
- SVal V = state->getSVal(Ex, LCtx);
- V = svalBuilder.evalCast(V, T, ExTy);
- // Negate the result if we're treating the boolean as a signed i1
- if (CastE->getCastKind() == CK_BooleanToSignedIntegral)
- V = evalMinus(V);
- state = state->BindExpr(CastE, LCtx, V);
- Bldr.generateNode(CastE, Pred, state);
+ state =
+ handleLValueBitCast(state, Ex, LCtx, T, ExTy, CastE, Bldr, Pred);
continue;
}
case CK_IntegralCast: {
@@ -385,7 +437,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
Failed = true;
// Else, evaluate the cast.
else
- val = getStoreManager().evalDynamicCast(val, T, Failed);
+ val = getStoreManager().attemptDownCast(val, T, Failed);
if (Failed) {
if (T->isReferenceType()) {
@@ -411,29 +463,55 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
Bldr.generateNode(CastE, Pred, state);
continue;
}
+ case CK_BaseToDerived: {
+ SVal val = state->getSVal(Ex, LCtx);
+ QualType resultType = CastE->getType();
+ if (CastE->isGLValue())
+ resultType = getContext().getPointerType(resultType);
+
+ bool Failed = false;
+
+ if (!val.isConstant()) {
+ val = getStoreManager().attemptDownCast(val, T, Failed);
+ }
+
+ // Failed to cast or the result is unknown, fall back to conservative.
+ if (Failed || val.isUnknown()) {
+ val =
+ svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx, resultType,
+ currBldrCtx->blockCount());
+ }
+ state = state->BindExpr(CastE, LCtx, val);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
case CK_NullToMemberPointer: {
- // FIXME: For now, member pointers are represented by void *.
- SVal V = svalBuilder.makeNull();
+ SVal V = svalBuilder.getMemberPointer(nullptr);
state = state->BindExpr(CastE, LCtx, V);
Bldr.generateNode(CastE, Pred, state);
continue;
}
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_ReinterpretMemberPointer: {
+ SVal V = state->getSVal(Ex, LCtx);
+ if (auto PTMSV = V.getAs<nonloc::PointerToMember>()) {
+ SVal CastedPTMSV = svalBuilder.makePointerToMember(
+ getBasicVals().accumCXXBase(
+ llvm::make_range<CastExpr::path_const_iterator>(
+ CastE->path_begin(), CastE->path_end()), *PTMSV));
+ state = state->BindExpr(CastE, LCtx, CastedPTMSV);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ // Explicitly proceed with default handler for this case cascade.
+ state = handleLVectorSplat(state, LCtx, CastE, Bldr, Pred);
+ continue;
+ }
// Various C++ casts that are not handled yet.
case CK_ToUnion:
- case CK_BaseToDerived:
- case CK_BaseToDerivedMemberPointer:
- case CK_DerivedToBaseMemberPointer:
- case CK_ReinterpretMemberPointer:
case CK_VectorSplat: {
- // Recover some path-sensitivty by conjuring a new value.
- QualType resultType = CastE->getType();
- if (CastE->isGLValue())
- resultType = getContext().getPointerType(resultType);
- SVal result = svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx,
- resultType,
- currBldrCtx->blockCount());
- state = state->BindExpr(CastE, LCtx, result);
- Bldr.generateNode(CastE, Pred, state);
+ state = handleLVectorSplat(state, LCtx, CastE, Bldr, Pred);
continue;
}
}
@@ -458,15 +536,7 @@ void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
Loc CLLoc = State->getLValue(CL, LCtx);
State = State->bindLoc(CLLoc, V);
- // Compound literal expressions are a GNU extension in C++.
- // Unlike in C, where CLs are lvalues, in C++ CLs are prvalues,
- // and like temporary objects created by the functional notation T()
- // CLs are destroyed at the end of the containing full-expression.
- // HOWEVER, an rvalue of array type is not something the analyzer can
- // reason about, since we expect all regions to be wrapped in Locs.
- // So we treat array CLs as lvalues as well, knowing that they will decay
- // to pointers as soon as they are used.
- if (CL->isGLValue() || CL->getType()->isArrayType())
+ if (CL->isGLValue())
V = CLLoc;
}
@@ -596,23 +666,13 @@ void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
if (RHSVal.isUndef()) {
X = RHSVal;
} else {
- DefinedOrUnknownSVal DefinedRHS = RHSVal.castAs<DefinedOrUnknownSVal>();
- ProgramStateRef StTrue, StFalse;
- std::tie(StTrue, StFalse) = N->getState()->assume(DefinedRHS);
- if (StTrue) {
- if (StFalse) {
- // We can't constrain the value to 0 or 1.
- // The best we can do is a cast.
- X = getSValBuilder().evalCast(RHSVal, B->getType(), RHS->getType());
- } else {
- // The value is known to be true.
- X = getSValBuilder().makeIntVal(1, B->getType());
- }
- } else {
- // The value is known to be false.
- assert(StFalse && "Infeasible path!");
- X = getSValBuilder().makeIntVal(0, B->getType());
- }
+ // We evaluate "RHSVal != 0" expression which result in 0 if the value is
+ // known to be false, 1 if the value is known to be true and a new symbol
+ // when the assumption is unknown.
+ nonloc::ConcreteInt Zero(getBasicVals().getValue(0, B->getType()));
+ X = evalBinOp(N->getState(), BO_NE,
+ svalBuilder.evalCast(RHSVal, B->getType(), RHS->getType()),
+ Zero, B->getType());
}
}
Bldr.generateNode(B, Pred, state->BindExpr(B, Pred->getLocationContext(), X));
@@ -644,7 +704,7 @@ void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
ei = IE->rend(); it != ei; ++it) {
SVal V = state->getSVal(cast<Expr>(*it), LCtx);
- vals = getBasicVals().consVals(V, vals);
+ vals = getBasicVals().prependSVal(V, vals);
}
B.generateNode(IE, Pred,
@@ -789,8 +849,24 @@ VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this);
}
-void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
- ExplodedNode *Pred,
+void ExprEngine::handleUOExtension(ExplodedNodeSet::iterator I,
+ const UnaryOperator *U,
+ StmtNodeBuilder &Bldr) {
+ // FIXME: We can probably just have some magic in Environment::getSVal()
+ // that propagates values, instead of creating a new node here.
+ //
+ // Unary "+" is a no-op, similar to a parentheses. We still have places
+ // where it may be a block-level expression, so we need to
+ // generate an extra node that just propagates the value of the
+ // subexpression.
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+ ProgramStateRef state = (*I)->getState();
+ const LocationContext *LCtx = (*I)->getLocationContext();
+ Bldr.generateNode(U, *I, state->BindExpr(U, LCtx,
+ state->getSVal(Ex, LCtx)));
+}
+
+void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
// FIXME: Prechecks eventually go in ::Visit().
ExplodedNodeSet CheckedSet;
@@ -842,24 +918,30 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
break;
}
+ case UO_AddrOf: {
+ // Process pointer-to-member address operation.
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Ex)) {
+ const ValueDecl *VD = DRE->getDecl();
+
+ if (isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD)) {
+ ProgramStateRef State = (*I)->getState();
+ const LocationContext *LCtx = (*I)->getLocationContext();
+ SVal SV = svalBuilder.getMemberPointer(cast<DeclaratorDecl>(VD));
+ Bldr.generateNode(U, *I, State->BindExpr(U, LCtx, SV));
+ break;
+ }
+ }
+ // Explicitly proceed with default handler for this case cascade.
+ handleUOExtension(I, U, Bldr);
+ break;
+ }
case UO_Plus:
assert(!U->isGLValue());
// FALL-THROUGH.
case UO_Deref:
- case UO_AddrOf:
case UO_Extension: {
- // FIXME: We can probably just have some magic in Environment::getSVal()
- // that propagates values, instead of creating a new node here.
- //
- // Unary "+" is a no-op, similar to a parentheses. We still have places
- // where it may be a block-level expression, so we need to
- // generate an extra node that just propagates the value of the
- // subexpression.
- const Expr *Ex = U->getSubExpr()->IgnoreParens();
- ProgramStateRef state = (*I)->getState();
- const LocationContext *LCtx = (*I)->getLocationContext();
- Bldr.generateNode(U, *I, state->BindExpr(U, LCtx,
- state->getSVal(Ex, LCtx)));
+ handleUOExtension(I, U, Bldr);
break;
}
diff --git a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index 556e2239abfb..7e9b2033ca37 100644
--- a/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -65,7 +65,7 @@ void ExprEngine::performTrivialCopy(NodeBuilder &Bldr, ExplodedNode *Pred,
if (Optional<Loc> L = V.getAs<Loc>())
V = Pred->getState()->getSVal(*L);
else
- assert(V.isUnknown());
+ assert(V.isUnknownOrUndef());
const Expr *CallExpr = Call.getOriginExpr();
evalBind(Dst, CallExpr, Pred, ThisVal, V, true);
@@ -346,6 +346,30 @@ void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *CE,
defaultEvalCall(Bldr, *I, *Call);
}
+ // If the CFG was contructed without elements for temporary destructors
+ // and the just-called constructor created a temporary object then
+ // stop exploration if the temporary object has a noreturn constructor.
+ // This can lose coverage because the destructor, if it were present
+ // in the CFG, would be called at the end of the full expression or
+ // later (for life-time extended temporaries) -- but avoids infeasible
+ // paths when no-return temporary destructors are used for assertions.
+ const AnalysisDeclContext *ADC = LCtx->getAnalysisDeclContext();
+ if (!ADC->getCFGBuildOptions().AddTemporaryDtors) {
+ const MemRegion *Target = Call->getCXXThisVal().getAsRegion();
+ if (Target && isa<CXXTempObjectRegion>(Target) &&
+ Call->getDecl()->getParent()->isAnyDestructorNoReturn()) {
+
+ for (ExplodedNode *N : DstEvaluated) {
+ Bldr.generateSink(CE, N, N->getState());
+ }
+
+ // There is no need to run the PostCall and PostStmtchecker
+ // callbacks because we just generated sinks on all nodes in th
+ // frontier.
+ return;
+ }
+ }
+
ExplodedNodeSet DstPostCall;
getCheckerManager().runCheckersForPostCall(DstPostCall, DstEvaluated,
*Call, *this);
@@ -578,9 +602,9 @@ void ExprEngine::VisitLambdaExpr(const LambdaExpr *LE, ExplodedNode *Pred,
const MemRegion *R = svalBuilder.getRegionManager().getCXXTempObjectRegion(
LE, LocCtxt);
SVal V = loc::MemRegionVal(R);
-
+
ProgramStateRef State = Pred->getState();
-
+
// If we created a new MemRegion for the lambda, we should explicitly bind
// the captures.
CXXRecordDecl::field_iterator CurField = LE->getLambdaClass()->field_begin();
diff --git a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 3a18956e4139..f157c3dd6ce2 100644
--- a/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -152,13 +152,30 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
}
// Process the path.
- unsigned n = path.size();
- unsigned max = n;
-
- for (PathPieces::const_reverse_iterator I = path.rbegin(),
- E = path.rend();
- I != E; ++I, --n)
- HandlePiece(R, FID, **I, n, max);
+ // Maintain the counts of extra note pieces separately.
+ unsigned TotalPieces = path.size();
+ unsigned TotalNotePieces =
+ std::count_if(path.begin(), path.end(),
+ [](const IntrusiveRefCntPtr<PathDiagnosticPiece> &p) {
+ return isa<PathDiagnosticNotePiece>(p.get());
+ });
+
+ unsigned TotalRegularPieces = TotalPieces - TotalNotePieces;
+ unsigned NumRegularPieces = TotalRegularPieces;
+ unsigned NumNotePieces = TotalNotePieces;
+
+ for (auto I = path.rbegin(), E = path.rend(); I != E; ++I) {
+ if (isa<PathDiagnosticNotePiece>(I->get())) {
+ // This adds diagnostic bubbles, but not navigation.
+ // Navigation through note pieces would be added later,
+ // as a separate pass through the piece list.
+ HandlePiece(R, FID, **I, NumNotePieces, TotalNotePieces);
+ --NumNotePieces;
+ } else {
+ HandlePiece(R, FID, **I, NumRegularPieces, TotalRegularPieces);
+ --NumRegularPieces;
+ }
+ }
// Add line numbers, header, footer, etc.
@@ -192,24 +209,38 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
int ColumnNumber = path.back()->getLocation().asLocation().getExpansionColumnNumber();
// Add the name of the file as an <h1> tag.
-
{
std::string s;
llvm::raw_string_ostream os(s);
os << "<!-- REPORTHEADER -->\n"
- << "<h3>Bug Summary</h3>\n<table class=\"simpletable\">\n"
+ << "<h3>Bug Summary</h3>\n<table class=\"simpletable\">\n"
"<tr><td class=\"rowname\">File:</td><td>"
- << html::EscapeText(DirName)
- << html::EscapeText(Entry->getName())
- << "</td></tr>\n<tr><td class=\"rowname\">Location:</td><td>"
- "<a href=\"#EndPath\">line "
- << LineNumber
- << ", column "
- << ColumnNumber
- << "</a></td></tr>\n"
- "<tr><td class=\"rowname\">Description:</td><td>"
- << D.getVerboseDescription() << "</td></tr>\n";
+ << html::EscapeText(DirName)
+ << html::EscapeText(Entry->getName())
+ << "</td></tr>\n<tr><td class=\"rowname\">Warning:</td><td>"
+ "<a href=\"#EndPath\">line "
+ << LineNumber
+ << ", column "
+ << ColumnNumber
+ << "</a><br />"
+ << D.getVerboseDescription() << "</td></tr>\n";
+
+ // The navigation across the extra notes pieces.
+ unsigned NumExtraPieces = 0;
+ for (const auto &Piece : path) {
+ if (const auto *P = dyn_cast<PathDiagnosticNotePiece>(Piece.get())) {
+ int LineNumber =
+ P->getLocation().asLocation().getExpansionLineNumber();
+ int ColumnNumber =
+ P->getLocation().asLocation().getExpansionColumnNumber();
+ os << "<tr><td class=\"rowname\">Note:</td><td>"
+ << "<a href=\"#Note" << NumExtraPieces << "\">line "
+ << LineNumber << ", column " << ColumnNumber << "</a><br />"
+ << P->getString() << "</td></tr>";
+ ++NumExtraPieces;
+ }
+ }
// Output any other meta data.
@@ -385,13 +416,20 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
// Create the html for the message.
const char *Kind = nullptr;
+ bool IsNote = false;
+ bool SuppressIndex = (max == 1);
switch (P.getKind()) {
case PathDiagnosticPiece::Call:
- llvm_unreachable("Calls should already be handled");
+ llvm_unreachable("Calls and extra notes should already be handled");
case PathDiagnosticPiece::Event: Kind = "Event"; break;
case PathDiagnosticPiece::ControlFlow: Kind = "Control"; break;
// Setting Kind to "Control" is intentional.
case PathDiagnosticPiece::Macro: Kind = "Control"; break;
+ case PathDiagnosticPiece::Note:
+ Kind = "Note";
+ IsNote = true;
+ SuppressIndex = true;
+ break;
}
std::string sbuf;
@@ -399,7 +437,9 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
os << "\n<tr><td class=\"num\"></td><td class=\"line\"><div id=\"";
- if (num == max)
+ if (IsNote)
+ os << "Note" << num;
+ else if (num == max)
os << "EndPath";
else
os << "Path" << num;
@@ -461,7 +501,7 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
os << "\">";
- if (max > 1) {
+ if (!SuppressIndex) {
os << "<table class=\"msgT\"><tr><td valign=\"top\">";
os << "<div class=\"PathIndex";
if (Kind) os << " PathIndex" << Kind;
@@ -501,7 +541,7 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
os << "':\n";
- if (max > 1) {
+ if (!SuppressIndex) {
os << "</td>";
if (num < max) {
os << "<td><div class=\"PathNav\"><a href=\"#";
@@ -523,7 +563,7 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
else {
os << html::EscapeText(P.getString());
- if (max > 1) {
+ if (!SuppressIndex) {
os << "</td>";
if (num < max) {
os << "<td><div class=\"PathNav\"><a href=\"#";
diff --git a/lib/StaticAnalyzer/Core/IssueHash.cpp b/lib/StaticAnalyzer/Core/IssueHash.cpp
index bd5c81179adc..abdea88b1db6 100644
--- a/lib/StaticAnalyzer/Core/IssueHash.cpp
+++ b/lib/StaticAnalyzer/Core/IssueHash.cpp
@@ -13,7 +13,6 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/Lexer.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
diff --git a/lib/StaticAnalyzer/Core/MemRegion.cpp b/lib/StaticAnalyzer/Core/MemRegion.cpp
index b7b6f42b2910..c4ba2ae199f8 100644
--- a/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -31,28 +31,6 @@ using namespace ento;
// MemRegion Construction.
//===----------------------------------------------------------------------===//
-template<typename RegionTy> struct MemRegionManagerTrait;
-
-template <typename RegionTy, typename A1>
-RegionTy* MemRegionManager::getRegion(const A1 a1) {
- const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
- MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1);
-
- llvm::FoldingSetNodeID ID;
- RegionTy::ProfileRegion(ID, a1, superRegion);
- void *InsertPos;
- RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
- InsertPos));
-
- if (!R) {
- R = A.Allocate<RegionTy>();
- new (R) RegionTy(a1, superRegion);
- Regions.InsertNode(R, InsertPos);
- }
-
- return R;
-}
-
template <typename RegionTy, typename A1>
RegionTy* MemRegionManager::getSubRegion(const A1 a1,
const MemRegion *superRegion) {
@@ -72,26 +50,6 @@ RegionTy* MemRegionManager::getSubRegion(const A1 a1,
}
template <typename RegionTy, typename A1, typename A2>
-RegionTy* MemRegionManager::getRegion(const A1 a1, const A2 a2) {
- const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
- MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1, a2);
-
- llvm::FoldingSetNodeID ID;
- RegionTy::ProfileRegion(ID, a1, a2, superRegion);
- void *InsertPos;
- RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
- InsertPos));
-
- if (!R) {
- R = A.Allocate<RegionTy>();
- new (R) RegionTy(a1, a2, superRegion);
- Regions.InsertNode(R, InsertPos);
- }
-
- return R;
-}
-
-template <typename RegionTy, typename A1, typename A2>
RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2,
const MemRegion *superRegion) {
llvm::FoldingSetNodeID ID;
diff --git a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
index 217d628a129c..5675cb2026f0 100644
--- a/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
+++ b/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -60,6 +60,7 @@ PathDiagnosticEventPiece::~PathDiagnosticEventPiece() {}
PathDiagnosticCallPiece::~PathDiagnosticCallPiece() {}
PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() {}
PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {}
+PathDiagnosticNotePiece::~PathDiagnosticNotePiece() {}
void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
bool ShouldFlattenMacros) const {
@@ -95,6 +96,7 @@ void PathPieces::flattenTo(PathPieces &Primary, PathPieces &Current,
}
case PathDiagnosticPiece::Event:
case PathDiagnosticPiece::ControlFlow:
+ case PathDiagnosticPiece::Note:
Current.push_back(Piece);
break;
}
@@ -211,6 +213,12 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(
const SourceManager &SMgr = D->path.front()->getLocation().getManager();
SmallVector<const PathPieces *, 5> WorkList;
WorkList.push_back(&D->path);
+ SmallString<128> buf;
+ llvm::raw_svector_ostream warning(buf);
+ warning << "warning: Path diagnostic report is not generated. Current "
+ << "output format does not support diagnostics that cross file "
+ << "boundaries. Refer to --analyzer-output for valid output "
+ << "formats\n";
while (!WorkList.empty()) {
const PathPieces &path = *WorkList.pop_back_val();
@@ -222,19 +230,25 @@ void PathDiagnosticConsumer::HandlePathDiagnostic(
if (FID.isInvalid()) {
FID = SMgr.getFileID(L);
- } else if (SMgr.getFileID(L) != FID)
- return; // FIXME: Emit a warning?
+ } else if (SMgr.getFileID(L) != FID) {
+ llvm::errs() << warning.str();
+ return;
+ }
// Check the source ranges.
ArrayRef<SourceRange> Ranges = piece->getRanges();
for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
E = Ranges.end(); I != E; ++I) {
SourceLocation L = SMgr.getExpansionLoc(I->getBegin());
- if (!L.isFileID() || SMgr.getFileID(L) != FID)
- return; // FIXME: Emit a warning?
+ if (!L.isFileID() || SMgr.getFileID(L) != FID) {
+ llvm::errs() << warning.str();
+ return;
+ }
L = SMgr.getExpansionLoc(I->getEnd());
- if (!L.isFileID() || SMgr.getFileID(L) != FID)
- return; // FIXME: Emit a warning?
+ if (!L.isFileID() || SMgr.getFileID(L) != FID) {
+ llvm::errs() << warning.str();
+ return;
+ }
}
if (const PathDiagnosticCallPiece *call =
@@ -342,15 +356,16 @@ static Optional<bool> comparePiece(const PathDiagnosticPiece &X,
}
switch (X.getKind()) {
- case clang::ento::PathDiagnosticPiece::ControlFlow:
+ case PathDiagnosticPiece::ControlFlow:
return compareControlFlow(cast<PathDiagnosticControlFlowPiece>(X),
cast<PathDiagnosticControlFlowPiece>(Y));
- case clang::ento::PathDiagnosticPiece::Event:
+ case PathDiagnosticPiece::Event:
+ case PathDiagnosticPiece::Note:
return None;
- case clang::ento::PathDiagnosticPiece::Macro:
+ case PathDiagnosticPiece::Macro:
return compareMacro(cast<PathDiagnosticMacroPiece>(X),
cast<PathDiagnosticMacroPiece>(Y));
- case clang::ento::PathDiagnosticPiece::Call:
+ case PathDiagnosticPiece::Call:
return compareCall(cast<PathDiagnosticCallPiece>(X),
cast<PathDiagnosticCallPiece>(Y));
}
@@ -1098,6 +1113,10 @@ void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const {
ID.Add(**I);
}
+void PathDiagnosticNotePiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticSpotPiece::Profile(ID);
+}
+
void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
ID.Add(getLocation());
ID.AddString(BugType);
diff --git a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 8ad931acdf7f..c5263ee0e5ca 100644
--- a/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -19,7 +19,6 @@
#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
#include "clang/StaticAnalyzer/Core/IssueHash.h"
#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
using namespace clang;
@@ -282,6 +281,9 @@ static void ReportPiece(raw_ostream &o,
ReportMacro(o, cast<PathDiagnosticMacroPiece>(P), FM, SM, LangOpts,
indent, depth);
break;
+ case PathDiagnosticPiece::Note:
+ // FIXME: Extend the plist format to support those.
+ break;
}
}
@@ -298,40 +300,42 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
SM = &Diags.front()->path.front()->getLocation().getManager();
- for (std::vector<const PathDiagnostic*>::iterator DI = Diags.begin(),
- DE = Diags.end(); DI != DE; ++DI) {
+ auto AddPieceFID = [&FM, &Fids, SM](const PathDiagnosticPiece *Piece)->void {
+ AddFID(FM, Fids, *SM, Piece->getLocation().asLocation());
+ ArrayRef<SourceRange> Ranges = Piece->getRanges();
+ for (const SourceRange &Range : Ranges) {
+ AddFID(FM, Fids, *SM, Range.getBegin());
+ AddFID(FM, Fids, *SM, Range.getEnd());
+ }
+ };
- const PathDiagnostic *D = *DI;
+ for (const PathDiagnostic *D : Diags) {
SmallVector<const PathPieces *, 5> WorkList;
WorkList.push_back(&D->path);
while (!WorkList.empty()) {
- const PathPieces &path = *WorkList.pop_back_val();
-
- for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E;
- ++I) {
- const PathDiagnosticPiece *piece = I->get();
- AddFID(FM, Fids, *SM, piece->getLocation().asLocation());
- ArrayRef<SourceRange> Ranges = piece->getRanges();
- for (ArrayRef<SourceRange>::iterator I = Ranges.begin(),
- E = Ranges.end(); I != E; ++I) {
- AddFID(FM, Fids, *SM, I->getBegin());
- AddFID(FM, Fids, *SM, I->getEnd());
- }
+ const PathPieces &Path = *WorkList.pop_back_val();
+
+ for (const auto &Iter : Path) {
+ const PathDiagnosticPiece *Piece = Iter.get();
+ AddPieceFID(Piece);
+
+ if (const PathDiagnosticCallPiece *Call =
+ dyn_cast<PathDiagnosticCallPiece>(Piece)) {
+ if (IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+ CallEnterWithin = Call->getCallEnterWithinCallerEvent())
+ AddPieceFID(CallEnterWithin.get());
- if (const PathDiagnosticCallPiece *call =
- dyn_cast<PathDiagnosticCallPiece>(piece)) {
- IntrusiveRefCntPtr<PathDiagnosticEventPiece>
- callEnterWithin = call->getCallEnterWithinCallerEvent();
- if (callEnterWithin)
- AddFID(FM, Fids, *SM, callEnterWithin->getLocation().asLocation());
+ if (IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+ CallEnterEvent = Call->getCallEnterEvent())
+ AddPieceFID(CallEnterEvent.get());
- WorkList.push_back(&call->path);
+ WorkList.push_back(&Call->path);
}
- else if (const PathDiagnosticMacroPiece *macro =
- dyn_cast<PathDiagnosticMacroPiece>(piece)) {
- WorkList.push_back(&macro->subPieces);
+ else if (const PathDiagnosticMacroPiece *Macro =
+ dyn_cast<PathDiagnosticMacroPiece>(Piece)) {
+ WorkList.push_back(&Macro->subPieces);
}
}
}
diff --git a/lib/StaticAnalyzer/Core/ProgramState.cpp b/lib/StaticAnalyzer/Core/ProgramState.cpp
index adda7af08db8..03ace35965cb 100644
--- a/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -527,32 +527,17 @@ bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
}
bool ScanReachableSymbols::scan(const SymExpr *sym) {
- bool wasVisited = !visited.insert(sym).second;
- if (wasVisited)
- return true;
-
- if (!visitor.VisitSymbol(sym))
- return false;
+ for (SymExpr::symbol_iterator SI = sym->symbol_begin(),
+ SE = sym->symbol_end();
+ SI != SE; ++SI) {
+ bool wasVisited = !visited.insert(*SI).second;
+ if (wasVisited)
+ continue;
- // TODO: should be rewritten using SymExpr::symbol_iterator.
- switch (sym->getKind()) {
- case SymExpr::SymbolRegionValueKind:
- case SymExpr::SymbolConjuredKind:
- case SymExpr::SymbolDerivedKind:
- case SymExpr::SymbolExtentKind:
- case SymExpr::SymbolMetadataKind:
- break;
- case SymExpr::SymbolCastKind:
- return scan(cast<SymbolCast>(sym)->getOperand());
- case SymExpr::SymIntExprKind:
- return scan(cast<SymIntExpr>(sym)->getLHS());
- case SymExpr::IntSymExprKind:
- return scan(cast<IntSymExpr>(sym)->getRHS());
- case SymExpr::SymSymExprKind: {
- const SymSymExpr *x = cast<SymSymExpr>(sym);
- return scan(x->getLHS()) && scan(x->getRHS());
- }
+ if (!visitor.VisitSymbol(*SI))
+ return false;
}
+
return true;
}
diff --git a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 77b0ad32b6b7..15073bb82b36 100644
--- a/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -18,7 +18,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
-#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -28,22 +27,17 @@ using namespace ento;
/// guarantee that from <= to. Note that Range is immutable, so as not
/// to subvert RangeSet's immutability.
namespace {
-class Range : public std::pair<const llvm::APSInt*,
- const llvm::APSInt*> {
+class Range : public std::pair<const llvm::APSInt *, const llvm::APSInt *> {
public:
Range(const llvm::APSInt &from, const llvm::APSInt &to)
- : std::pair<const llvm::APSInt*, const llvm::APSInt*>(&from, &to) {
+ : std::pair<const llvm::APSInt *, const llvm::APSInt *>(&from, &to) {
assert(from <= to);
}
bool Includes(const llvm::APSInt &v) const {
return *first <= v && v <= *second;
}
- const llvm::APSInt &From() const {
- return *first;
- }
- const llvm::APSInt &To() const {
- return *second;
- }
+ const llvm::APSInt &From() const { return *first; }
+ const llvm::APSInt &To() const { return *second; }
const llvm::APSInt *getConcreteValue() const {
return &From() == &To() ? &From() : nullptr;
}
@@ -54,7 +48,6 @@ public:
}
};
-
class RangeTrait : public llvm::ImutContainerInfo<Range> {
public:
// When comparing if one Range is less than another, we should compare
@@ -62,8 +55,8 @@ public:
// consistent (instead of comparing by pointer values) and can potentially
// be used to speed up some of the operations in RangeSet.
static inline bool isLess(key_type_ref lhs, key_type_ref rhs) {
- return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) &&
- *lhs.second < *rhs.second);
+ return *lhs.first < *rhs.first ||
+ (!(*rhs.first < *lhs.first) && *lhs.second < *rhs.second);
}
};
@@ -97,7 +90,7 @@ public:
/// Construct a new RangeSet representing '{ [from, to] }'.
RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
- : ranges(F.add(F.getEmptySet(), Range(from, to))) {}
+ : ranges(F.add(F.getEmptySet(), Range(from, to))) {}
/// Profile - Generates a hash profile of this RangeSet for use
/// by FoldingSet.
@@ -106,16 +99,14 @@ public:
/// getConcreteValue - If a symbol is contrained to equal a specific integer
/// constant then this method returns that value. Otherwise, it returns
/// NULL.
- const llvm::APSInt* getConcreteValue() const {
+ const llvm::APSInt *getConcreteValue() const {
return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : nullptr;
}
private:
void IntersectInRange(BasicValueFactory &BV, Factory &F,
- const llvm::APSInt &Lower,
- const llvm::APSInt &Upper,
- PrimRangeSet &newRanges,
- PrimRangeSet::iterator &i,
+ const llvm::APSInt &Lower, const llvm::APSInt &Upper,
+ PrimRangeSet &newRanges, PrimRangeSet::iterator &i,
PrimRangeSet::iterator &e) const {
// There are six cases for each range R in the set:
// 1. R is entirely before the intersection range.
@@ -135,8 +126,8 @@ private:
if (i->Includes(Lower)) {
if (i->Includes(Upper)) {
- newRanges = F.add(newRanges, Range(BV.getValue(Lower),
- BV.getValue(Upper)));
+ newRanges =
+ F.add(newRanges, Range(BV.getValue(Lower), BV.getValue(Upper)));
break;
} else
newRanges = F.add(newRanges, Range(BV.getValue(Lower), i->To()));
@@ -244,8 +235,8 @@ public:
// range is taken to wrap around. This is equivalent to taking the
// intersection with the two ranges [Min, Upper] and [Lower, Max],
// or, alternatively, /removing/ all integers between Upper and Lower.
- RangeSet Intersect(BasicValueFactory &BV, Factory &F,
- llvm::APSInt Lower, llvm::APSInt Upper) const {
+ RangeSet Intersect(BasicValueFactory &BV, Factory &F, llvm::APSInt Lower,
+ llvm::APSInt Upper) const {
if (!pin(Lower, Upper))
return F.getEmptySet();
@@ -291,53 +282,54 @@ REGISTER_TRAIT_WITH_PROGRAMSTATE(ConstraintRange,
RangeSet))
namespace {
-class RangeConstraintManager : public SimpleConstraintManager{
- RangeSet GetRange(ProgramStateRef state, SymbolRef sym);
+class RangeConstraintManager : public SimpleConstraintManager {
+ RangeSet getRange(ProgramStateRef State, SymbolRef Sym);
+
public:
- RangeConstraintManager(SubEngine *subengine, SValBuilder &SVB)
- : SimpleConstraintManager(subengine, SVB) {}
+ RangeConstraintManager(SubEngine *SE, SValBuilder &SVB)
+ : SimpleConstraintManager(SE, SVB) {}
- ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) override;
+ ProgramStateRef assumeSymNE(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
- ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) override;
+ ProgramStateRef assumeSymEQ(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
- ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) override;
+ ProgramStateRef assumeSymLT(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
- ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) override;
+ ProgramStateRef assumeSymGT(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
- ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) override;
+ ProgramStateRef assumeSymLE(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
- ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& Int,
- const llvm::APSInt& Adjustment) override;
+ ProgramStateRef assumeSymGE(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) override;
ProgramStateRef assumeSymbolWithinInclusiveRange(
- ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
- const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
ProgramStateRef assumeSymbolOutOfInclusiveRange(
- ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
- const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, const llvm::APSInt &Adjustment) override;
- const llvm::APSInt* getSymVal(ProgramStateRef St,
- SymbolRef sym) const override;
+ const llvm::APSInt *getSymVal(ProgramStateRef St,
+ SymbolRef Sym) const override;
ConditionTruthVal checkNull(ProgramStateRef State, SymbolRef Sym) override;
ProgramStateRef removeDeadBindings(ProgramStateRef St,
- SymbolReaper& SymReaper) override;
+ SymbolReaper &SymReaper) override;
- void print(ProgramStateRef St, raw_ostream &Out,
- const char* nl, const char *sep) override;
+ void print(ProgramStateRef St, raw_ostream &Out, const char *nl,
+ const char *sep) override;
private:
RangeSet::Factory F;
@@ -364,9 +356,9 @@ ento::CreateRangeConstraintManager(ProgramStateManager &StMgr, SubEngine *Eng) {
return llvm::make_unique<RangeConstraintManager>(Eng, StMgr.getSValBuilder());
}
-const llvm::APSInt* RangeConstraintManager::getSymVal(ProgramStateRef St,
- SymbolRef sym) const {
- const ConstraintRangeTy::data_type *T = St->get<ConstraintRange>(sym);
+const llvm::APSInt *RangeConstraintManager::getSymVal(ProgramStateRef St,
+ SymbolRef Sym) const {
+ const ConstraintRangeTy::data_type *T = St->get<ConstraintRange>(Sym);
return T ? T->getConcreteValue() : nullptr;
}
@@ -397,30 +389,32 @@ ConditionTruthVal RangeConstraintManager::checkNull(ProgramStateRef State,
/// Scan all symbols referenced by the constraints. If the symbol is not alive
/// as marked in LSymbols, mark it as dead in DSymbols.
ProgramStateRef
-RangeConstraintManager::removeDeadBindings(ProgramStateRef state,
- SymbolReaper& SymReaper) {
-
- ConstraintRangeTy CR = state->get<ConstraintRange>();
- ConstraintRangeTy::Factory& CRFactory = state->get_context<ConstraintRange>();
+RangeConstraintManager::removeDeadBindings(ProgramStateRef State,
+ SymbolReaper &SymReaper) {
+ bool Changed = false;
+ ConstraintRangeTy CR = State->get<ConstraintRange>();
+ ConstraintRangeTy::Factory &CRFactory = State->get_context<ConstraintRange>();
for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
- SymbolRef sym = I.getKey();
- if (SymReaper.maybeDead(sym))
- CR = CRFactory.remove(CR, sym);
+ SymbolRef Sym = I.getKey();
+ if (SymReaper.maybeDead(Sym)) {
+ Changed = true;
+ CR = CRFactory.remove(CR, Sym);
+ }
}
- return state->set<ConstraintRange>(CR);
+ return Changed ? State->set<ConstraintRange>(CR) : State;
}
-RangeSet
-RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) {
- if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym))
+RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
+ SymbolRef Sym) {
+ if (ConstraintRangeTy::data_type *V = State->get<ConstraintRange>(Sym))
return *V;
// Lazily generate a new RangeSet representing all possible values for the
// given symbol type.
BasicValueFactory &BV = getBasicVals();
- QualType T = sym->getType();
+ QualType T = Sym->getType();
RangeSet Result(F, BV.getMinValue(T), BV.getMaxValue(T));
@@ -428,7 +422,7 @@ RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) {
if (T->isReferenceType()) {
APSIntType IntType = BV.getAPSIntType(T);
Result = Result.Intersect(BV, F, ++IntType.getZeroValue(),
- --IntType.getZeroValue());
+ --IntType.getZeroValue());
}
return Result;
@@ -462,7 +456,7 @@ RangeConstraintManager::assumeSymNE(ProgramStateRef St, SymbolRef Sym,
// [Int-Adjustment+1, Int-Adjustment-1]
// Notice that the lower bound is greater than the upper bound.
- RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, Upper, Lower);
+ RangeSet New = getRange(St, Sym).Intersect(getBasicVals(), F, Upper, Lower);
return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
}
@@ -477,7 +471,7 @@ RangeConstraintManager::assumeSymEQ(ProgramStateRef St, SymbolRef Sym,
// [Int-Adjustment, Int-Adjustment]
llvm::APSInt AdjInt = AdjustmentType.convert(Int) - Adjustment;
- RangeSet New = GetRange(St, Sym).Intersect(getBasicVals(), F, AdjInt, AdjInt);
+ RangeSet New = getRange(St, Sym).Intersect(getBasicVals(), F, AdjInt, AdjInt);
return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
}
@@ -493,7 +487,7 @@ RangeSet RangeConstraintManager::getSymLTRange(ProgramStateRef St,
case APSIntType::RTR_Within:
break;
case APSIntType::RTR_Above:
- return GetRange(St, Sym);
+ return getRange(St, Sym);
}
// Special case for Int == Min. This is always false.
@@ -506,7 +500,7 @@ RangeSet RangeConstraintManager::getSymLTRange(ProgramStateRef St,
llvm::APSInt Upper = ComparisonVal - Adjustment;
--Upper;
- return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ return getRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
}
ProgramStateRef
@@ -517,15 +511,15 @@ RangeConstraintManager::assumeSymLT(ProgramStateRef St, SymbolRef Sym,
return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
}
-RangeSet
-RangeConstraintManager::getSymGTRange(ProgramStateRef St, SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+RangeSet RangeConstraintManager::getSymGTRange(ProgramStateRef St,
+ SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
case APSIntType::RTR_Below:
- return GetRange(St, Sym);
+ return getRange(St, Sym);
case APSIntType::RTR_Within:
break;
case APSIntType::RTR_Above:
@@ -542,7 +536,7 @@ RangeConstraintManager::getSymGTRange(ProgramStateRef St, SymbolRef Sym,
llvm::APSInt Upper = Max - Adjustment;
++Lower;
- return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ return getRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
}
ProgramStateRef
@@ -553,15 +547,15 @@ RangeConstraintManager::assumeSymGT(ProgramStateRef St, SymbolRef Sym,
return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
}
-RangeSet
-RangeConstraintManager::getSymGERange(ProgramStateRef St, SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+RangeSet RangeConstraintManager::getSymGERange(ProgramStateRef St,
+ SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
case APSIntType::RTR_Below:
- return GetRange(St, Sym);
+ return getRange(St, Sym);
case APSIntType::RTR_Within:
break;
case APSIntType::RTR_Above:
@@ -572,13 +566,13 @@ RangeConstraintManager::getSymGERange(ProgramStateRef St, SymbolRef Sym,
llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
llvm::APSInt Min = AdjustmentType.getMinValue();
if (ComparisonVal == Min)
- return GetRange(St, Sym);
+ return getRange(St, Sym);
llvm::APSInt Max = AdjustmentType.getMaxValue();
llvm::APSInt Lower = ComparisonVal - Adjustment;
llvm::APSInt Upper = Max - Adjustment;
- return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ return getRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
}
ProgramStateRef
@@ -589,10 +583,9 @@ RangeConstraintManager::assumeSymGE(ProgramStateRef St, SymbolRef Sym,
return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
}
-RangeSet
-RangeConstraintManager::getSymLERange(const RangeSet &RS,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+RangeSet RangeConstraintManager::getSymLERange(const RangeSet &RS,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
@@ -617,10 +610,10 @@ RangeConstraintManager::getSymLERange(const RangeSet &RS,
return RS.Intersect(getBasicVals(), F, Lower, Upper);
}
-RangeSet
-RangeConstraintManager::getSymLERange(ProgramStateRef St, SymbolRef Sym,
- const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
+RangeSet RangeConstraintManager::getSymLERange(ProgramStateRef St,
+ SymbolRef Sym,
+ const llvm::APSInt &Int,
+ const llvm::APSInt &Adjustment) {
// Before we do any real work, see if the value can even show up.
APSIntType AdjustmentType(Adjustment);
switch (AdjustmentType.testInRange(Int, true)) {
@@ -629,20 +622,20 @@ RangeConstraintManager::getSymLERange(ProgramStateRef St, SymbolRef Sym,
case APSIntType::RTR_Within:
break;
case APSIntType::RTR_Above:
- return GetRange(St, Sym);
+ return getRange(St, Sym);
}
// Special case for Int == Max. This is always feasible.
llvm::APSInt ComparisonVal = AdjustmentType.convert(Int);
llvm::APSInt Max = AdjustmentType.getMaxValue();
if (ComparisonVal == Max)
- return GetRange(St, Sym);
+ return getRange(St, Sym);
llvm::APSInt Min = AdjustmentType.getMinValue();
llvm::APSInt Lower = Min - Adjustment;
llvm::APSInt Upper = ComparisonVal - Adjustment;
- return GetRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ return getRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
}
ProgramStateRef
@@ -653,8 +646,7 @@ RangeConstraintManager::assumeSymLE(ProgramStateRef St, SymbolRef Sym,
return New.isEmpty() ? nullptr : St->set<ConstraintRange>(Sym, New);
}
-ProgramStateRef
-RangeConstraintManager::assumeSymbolWithinInclusiveRange(
+ProgramStateRef RangeConstraintManager::assumeSymbolWithinInclusiveRange(
ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
const llvm::APSInt &To, const llvm::APSInt &Adjustment) {
RangeSet New = getSymGERange(State, Sym, From, Adjustment);
@@ -664,8 +656,7 @@ RangeConstraintManager::assumeSymbolWithinInclusiveRange(
return New.isEmpty() ? nullptr : State->set<ConstraintRange>(Sym, New);
}
-ProgramStateRef
-RangeConstraintManager::assumeSymbolOutOfInclusiveRange(
+ProgramStateRef RangeConstraintManager::assumeSymbolOutOfInclusiveRange(
ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
const llvm::APSInt &To, const llvm::APSInt &Adjustment) {
RangeSet RangeLT = getSymLTRange(State, Sym, From, Adjustment);
@@ -679,7 +670,7 @@ RangeConstraintManager::assumeSymbolOutOfInclusiveRange(
//===------------------------------------------------------------------------===/
void RangeConstraintManager::print(ProgramStateRef St, raw_ostream &Out,
- const char* nl, const char *sep) {
+ const char *nl, const char *sep) {
ConstraintRangeTy Ranges = St->get<ConstraintRange>();
@@ -689,7 +680,8 @@ void RangeConstraintManager::print(ProgramStateRef St, raw_ostream &Out,
}
Out << nl << sep << "Ranges of symbol values:";
- for (ConstraintRangeTy::iterator I=Ranges.begin(), E=Ranges.end(); I!=E; ++I){
+ for (ConstraintRangeTy::iterator I = Ranges.begin(), E = Ranges.end(); I != E;
+ ++I) {
Out << nl << ' ' << I.getKey() << " : ";
I.getData().print(Out);
}
diff --git a/lib/StaticAnalyzer/Core/RegionStore.cpp b/lib/StaticAnalyzer/Core/RegionStore.cpp
index 0d173c464481..15ca2c14f944 100644
--- a/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -26,7 +26,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
-#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/raw_ostream.h"
@@ -1675,7 +1674,8 @@ RegionStoreManager::getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
// Lazy bindings are usually handled through getExistingLazyBinding().
// We should unify these two code paths at some point.
- if (val.getAs<nonloc::LazyCompoundVal>())
+ if (val.getAs<nonloc::LazyCompoundVal>() ||
+ val.getAs<nonloc::CompoundVal>())
return val;
llvm_unreachable("Unknown default value");
@@ -2073,11 +2073,10 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
if (Init.getAs<nonloc::LazyCompoundVal>())
return bindAggregate(B, R, Init);
- // Remaining case: explicit compound values.
-
if (Init.isUnknown())
- return setImplicitDefaultValue(B, R, ElementTy);
+ return bindAggregate(B, R, UnknownVal());
+ // Remaining case: explicit compound values.
const nonloc::CompoundVal& CV = Init.castAs<nonloc::CompoundVal>();
nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
uint64_t i = 0;
diff --git a/lib/StaticAnalyzer/Core/SValBuilder.cpp b/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 72bcdd9ecb06..10b0858b8488 100644
--- a/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -36,8 +36,11 @@ DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
if (type->isIntegralOrEnumerationType())
return makeIntVal(0, type);
+ if (type->isArrayType() || type->isRecordType() || type->isVectorType() ||
+ type->isAnyComplexType())
+ return makeCompoundVal(type, BasicVals.getEmptySValList());
+
// FIXME: Handle floats.
- // FIXME: Handle structs.
return UnknownVal();
}
@@ -182,11 +185,12 @@ SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
DefinedSVal SValBuilder::getMetadataSymbolVal(const void *symbolTag,
const MemRegion *region,
const Expr *expr, QualType type,
+ const LocationContext *LCtx,
unsigned count) {
assert(SymbolManager::canSymbolicate(type) && "Invalid metadata symbol type");
SymbolRef sym =
- SymMgr.getMetadataSymbol(region, expr, type, count, symbolTag);
+ SymMgr.getMetadataSymbol(region, expr, type, LCtx, count, symbolTag);
if (Loc::isLocType(type))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
@@ -213,6 +217,10 @@ SValBuilder::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
return nonloc::SymbolVal(sym);
}
+DefinedSVal SValBuilder::getMemberPointer(const DeclaratorDecl* DD) {
+ return nonloc::PointerToMember(DD);
+}
+
DefinedSVal SValBuilder::getFunctionPointer(const FunctionDecl *func) {
return loc::MemRegionVal(MemMgr.getFunctionCodeRegion(func));
}
diff --git a/lib/StaticAnalyzer/Core/SVals.cpp b/lib/StaticAnalyzer/Core/SVals.cpp
index a30beed688b7..9f2af3ffa709 100644
--- a/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/lib/StaticAnalyzer/Core/SVals.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/Support/raw_ostream.h"
+#include "clang/AST/DeclCXX.h"
using namespace clang;
using namespace ento;
using llvm::APSInt;
@@ -56,6 +57,10 @@ const FunctionDecl *SVal::getAsFunctionDecl() const {
return FD;
}
+ if (auto X = getAs<nonloc::PointerToMember>()) {
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(X->getDecl()))
+ return MD;
+ }
return nullptr;
}
@@ -155,6 +160,20 @@ const TypedValueRegion *nonloc::LazyCompoundVal::getRegion() const {
return static_cast<const LazyCompoundValData*>(Data)->getRegion();
}
+const DeclaratorDecl *nonloc::PointerToMember::getDecl() const {
+ const auto PTMD = this->getPTMData();
+ if (PTMD.isNull())
+ return nullptr;
+
+ const DeclaratorDecl *DD = nullptr;
+ if (PTMD.is<const DeclaratorDecl *>())
+ DD = PTMD.get<const DeclaratorDecl *>();
+ else
+ DD = PTMD.get<const PointerToMemberData *>()->getDeclaratorDecl();
+
+ return DD;
+}
+
//===----------------------------------------------------------------------===//
// Other Iterators.
//===----------------------------------------------------------------------===//
@@ -167,6 +186,20 @@ nonloc::CompoundVal::iterator nonloc::CompoundVal::end() const {
return getValue()->end();
}
+nonloc::PointerToMember::iterator nonloc::PointerToMember::begin() const {
+ const PTMDataType PTMD = getPTMData();
+ if (PTMD.is<const DeclaratorDecl *>())
+ return nonloc::PointerToMember::iterator();
+ return PTMD.get<const PointerToMemberData *>()->begin();
+}
+
+nonloc::PointerToMember::iterator nonloc::PointerToMember::end() const {
+ const PTMDataType PTMD = getPTMData();
+ if (PTMD.is<const DeclaratorDecl *>())
+ return nonloc::PointerToMember::iterator();
+ return PTMD.get<const PointerToMemberData *>()->end();
+}
+
//===----------------------------------------------------------------------===//
// Useful predicates.
//===----------------------------------------------------------------------===//
@@ -299,6 +332,26 @@ void NonLoc::dumpToStream(raw_ostream &os) const {
<< '}';
break;
}
+ case nonloc::PointerToMemberKind: {
+ os << "pointerToMember{";
+ const nonloc::PointerToMember &CastRes =
+ castAs<nonloc::PointerToMember>();
+ if (CastRes.getDecl())
+ os << "|" << CastRes.getDecl()->getQualifiedNameAsString() << "|";
+ bool first = true;
+ for (const auto &I : CastRes) {
+ if (first) {
+ os << ' '; first = false;
+ }
+ else
+ os << ", ";
+
+ os << (*I).getType().getAsString();
+ }
+
+ os << '}';
+ break;
+ }
default:
assert (false && "Pretty-printed not implemented for this NonLoc.");
break;
diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index 4051242434ec..0e512ff80861 100644
--- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -30,22 +30,22 @@ bool SimpleConstraintManager::canReasonAbout(SVal X) const {
if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
switch (SIE->getOpcode()) {
- // We don't reason yet about bitwise-constraints on symbolic values.
- case BO_And:
- case BO_Or:
- case BO_Xor:
- return false;
- // We don't reason yet about these arithmetic constraints on
- // symbolic values.
- case BO_Mul:
- case BO_Div:
- case BO_Rem:
- case BO_Shl:
- case BO_Shr:
- return false;
- // All other cases.
- default:
- return true;
+ // We don't reason yet about bitwise-constraints on symbolic values.
+ case BO_And:
+ case BO_Or:
+ case BO_Xor:
+ return false;
+ // We don't reason yet about these arithmetic constraints on
+ // symbolic values.
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Shl:
+ case BO_Shr:
+ return false;
+ // All other cases.
+ default:
+ return true;
}
}
@@ -65,12 +65,12 @@ bool SimpleConstraintManager::canReasonAbout(SVal X) const {
return true;
}
-ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
- DefinedSVal Cond,
- bool Assumption) {
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
+ DefinedSVal Cond,
+ bool Assumption) {
// If we have a Loc value, cast it to a bool NonLoc first.
if (Optional<Loc> LV = Cond.getAs<Loc>()) {
- SValBuilder &SVB = state->getStateManager().getSValBuilder();
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
QualType T;
const MemRegion *MR = LV->getAsRegion();
if (const TypedRegion *TR = dyn_cast_or_null<TypedRegion>(MR))
@@ -81,19 +81,17 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
Cond = SVB.evalCast(*LV, SVB.getContext().BoolTy, T).castAs<DefinedSVal>();
}
- return assume(state, Cond.castAs<NonLoc>(), Assumption);
+ return assume(State, Cond.castAs<NonLoc>(), Assumption);
}
-ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
- NonLoc cond,
- bool assumption) {
- state = assumeAux(state, cond, assumption);
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
+ NonLoc Cond, bool Assumption) {
+ State = assumeAux(State, Cond, Assumption);
if (NotifyAssumeClients && SU)
- return SU->processAssume(state, cond, assumption);
- return state;
+ return SU->processAssume(State, Cond, Assumption);
+ return State;
}
-
ProgramStateRef
SimpleConstraintManager::assumeAuxForSymbol(ProgramStateRef State,
SymbolRef Sym, bool Assumption) {
@@ -111,16 +109,16 @@ SimpleConstraintManager::assumeAuxForSymbol(ProgramStateRef State,
return assumeSymEQ(State, Sym, zero, zero);
}
-ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
- NonLoc Cond,
- bool Assumption) {
+ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef State,
+ NonLoc Cond,
+ bool Assumption) {
// We cannot reason about SymSymExprs, and can only reason about some
// SymIntExprs.
if (!canReasonAbout(Cond)) {
// Just add the constraint to the expression without trying to simplify.
- SymbolRef sym = Cond.getAsSymExpr();
- return assumeAuxForSymbol(state, sym, Assumption);
+ SymbolRef Sym = Cond.getAsSymExpr();
+ return assumeAuxForSymbol(State, Sym, Assumption);
}
switch (Cond.getSubKind()) {
@@ -129,26 +127,26 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
case nonloc::SymbolValKind: {
nonloc::SymbolVal SV = Cond.castAs<nonloc::SymbolVal>();
- SymbolRef sym = SV.getSymbol();
- assert(sym);
+ SymbolRef Sym = SV.getSymbol();
+ assert(Sym);
// Handle SymbolData.
if (!SV.isExpression()) {
- return assumeAuxForSymbol(state, sym, Assumption);
+ return assumeAuxForSymbol(State, Sym, Assumption);
- // Handle symbolic expression.
- } else if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(sym)) {
+ // Handle symbolic expression.
+ } else if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(Sym)) {
// We can only simplify expressions whose RHS is an integer.
- BinaryOperator::Opcode op = SE->getOpcode();
- if (BinaryOperator::isComparisonOp(op)) {
+ BinaryOperator::Opcode Op = SE->getOpcode();
+ if (BinaryOperator::isComparisonOp(Op)) {
if (!Assumption)
- op = BinaryOperator::negateComparisonOp(op);
+ Op = BinaryOperator::negateComparisonOp(Op);
- return assumeSymRel(state, SE->getLHS(), op, SE->getRHS());
+ return assumeSymRel(State, SE->getLHS(), Op, SE->getRHS());
}
- } else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(sym)) {
+ } else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
// Translate "a != b" to "(b - a) != 0".
// We invert the order of the operands as a heuristic for how loop
// conditions are usually written ("begin != end") as compared to length
@@ -163,34 +161,40 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
assert(Loc::isLocType(SSE->getLHS()->getType()));
assert(Loc::isLocType(SSE->getRHS()->getType()));
QualType DiffTy = SymMgr.getContext().getPointerDiffType();
- SymbolRef Subtraction = SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub,
- SSE->getLHS(), DiffTy);
+ SymbolRef Subtraction =
+ SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), DiffTy);
const llvm::APSInt &Zero = getBasicVals().getValue(0, DiffTy);
Op = BinaryOperator::reverseComparisonOp(Op);
if (!Assumption)
Op = BinaryOperator::negateComparisonOp(Op);
- return assumeSymRel(state, Subtraction, Op, Zero);
+ return assumeSymRel(State, Subtraction, Op, Zero);
}
// If we get here, there's nothing else we can do but treat the symbol as
// opaque.
- return assumeAuxForSymbol(state, sym, Assumption);
+ return assumeAuxForSymbol(State, Sym, Assumption);
}
case nonloc::ConcreteIntKind: {
bool b = Cond.castAs<nonloc::ConcreteInt>().getValue() != 0;
bool isFeasible = b ? Assumption : !Assumption;
- return isFeasible ? state : nullptr;
+ return isFeasible ? State : nullptr;
+ }
+
+ case nonloc::PointerToMemberKind: {
+ bool IsNull = !Cond.castAs<nonloc::PointerToMember>().isNullMemberPointer();
+ bool IsFeasible = IsNull ? Assumption : !Assumption;
+ return IsFeasible ? State : nullptr;
}
case nonloc::LocAsIntegerKind:
- return assume(state, Cond.castAs<nonloc::LocAsInteger>().getLoc(),
+ return assume(State, Cond.castAs<nonloc::LocAsInteger>().getLoc(),
Assumption);
} // end switch
}
-ProgramStateRef SimpleConstraintManager::assumeWithinInclusiveRange(
+ProgramStateRef SimpleConstraintManager::assumeInclusiveRange(
ProgramStateRef State, NonLoc Value, const llvm::APSInt &From,
const llvm::APSInt &To, bool InRange) {
@@ -207,7 +211,7 @@ ProgramStateRef SimpleConstraintManager::assumeWithinInclusiveRange(
switch (Value.getSubKind()) {
default:
- llvm_unreachable("'assumeWithinInclusiveRange' is not implemented"
+ llvm_unreachable("'assumeInclusiveRange' is not implemented"
"for this NonLoc");
case nonloc::LocAsIntegerKind:
@@ -243,13 +247,26 @@ static void computeAdjustment(SymbolRef &Sym, llvm::APSInt &Adjustment) {
}
}
-ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
- const SymExpr *LHS,
- BinaryOperator::Opcode op,
- const llvm::APSInt& Int) {
- assert(BinaryOperator::isComparisonOp(op) &&
+ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef State,
+ const SymExpr *LHS,
+ BinaryOperator::Opcode Op,
+ const llvm::APSInt &Int) {
+ assert(BinaryOperator::isComparisonOp(Op) &&
"Non-comparison ops should be rewritten as comparisons to zero.");
+ SymbolRef Sym = LHS;
+
+ // Simplification: translate an assume of a constraint of the form
+ // "(exp comparison_op expr) != 0" to true into an assume of
+ // "exp comparison_op expr" to true. (And similarly, an assume of the form
+ // "(exp comparison_op expr) == 0" to true into an assume of
+ // "exp comparison_op expr" to false.)
+ if (Int == 0 && (Op == BO_EQ || Op == BO_NE)) {
+ if (const BinarySymExpr *SE = dyn_cast<BinarySymExpr>(Sym))
+ if (BinaryOperator::isComparisonOp(SE->getOpcode()))
+ return assume(State, nonloc::SymbolVal(Sym), (Op == BO_NE ? true : false));
+ }
+
// Get the type used for calculating wraparound.
BasicValueFactory &BVF = getBasicVals();
APSIntType WraparoundType = BVF.getAPSIntType(LHS->getType());
@@ -261,7 +278,6 @@ ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
// x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which
// in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to
// the subclasses of SimpleConstraintManager to handle the adjustment.
- SymbolRef Sym = LHS;
llvm::APSInt Adjustment = WraparoundType.getZeroValue();
computeAdjustment(Sym, Adjustment);
@@ -274,36 +290,33 @@ ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
ComparisonType.isUnsigned() && !WraparoundType.isUnsigned())
Adjustment.setIsSigned(false);
- switch (op) {
+ switch (Op) {
default:
llvm_unreachable("invalid operation not caught by assertion above");
case BO_EQ:
- return assumeSymEQ(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymEQ(State, Sym, ConvertedInt, Adjustment);
case BO_NE:
- return assumeSymNE(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymNE(State, Sym, ConvertedInt, Adjustment);
case BO_GT:
- return assumeSymGT(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymGT(State, Sym, ConvertedInt, Adjustment);
case BO_GE:
- return assumeSymGE(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymGE(State, Sym, ConvertedInt, Adjustment);
case BO_LT:
- return assumeSymLT(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymLT(State, Sym, ConvertedInt, Adjustment);
case BO_LE:
- return assumeSymLE(state, Sym, ConvertedInt, Adjustment);
+ return assumeSymLE(State, Sym, ConvertedInt, Adjustment);
} // end switch
}
-ProgramStateRef
-SimpleConstraintManager::assumeSymWithinInclusiveRange(ProgramStateRef State,
- SymbolRef Sym,
- const llvm::APSInt &From,
- const llvm::APSInt &To,
- bool InRange) {
+ProgramStateRef SimpleConstraintManager::assumeSymWithinInclusiveRange(
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
+ const llvm::APSInt &To, bool InRange) {
// Get the type used for calculating wraparound.
BasicValueFactory &BVF = getBasicVals();
APSIntType WraparoundType = BVF.getAPSIntType(Sym->getType());
diff --git a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
index b26bc9486110..1128e775b320 100644
--- a/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
+++ b/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -24,30 +24,28 @@ namespace ento {
class SimpleConstraintManager : public ConstraintManager {
SubEngine *SU;
SValBuilder &SVB;
+
public:
- SimpleConstraintManager(SubEngine *subengine, SValBuilder &SB)
- : SU(subengine), SVB(SB) {}
+ SimpleConstraintManager(SubEngine *SE, SValBuilder &SB) : SU(SE), SVB(SB) {}
~SimpleConstraintManager() override;
//===------------------------------------------------------------------===//
// Common implementation for the interface provided by ConstraintManager.
//===------------------------------------------------------------------===//
- ProgramStateRef assume(ProgramStateRef state, DefinedSVal Cond,
- bool Assumption) override;
+ ProgramStateRef assume(ProgramStateRef State, DefinedSVal Cond,
+ bool Assumption) override;
- ProgramStateRef assume(ProgramStateRef state, NonLoc Cond, bool Assumption);
+ ProgramStateRef assume(ProgramStateRef State, NonLoc Cond, bool Assumption);
- ProgramStateRef assumeWithinInclusiveRange(ProgramStateRef State,
- NonLoc Value,
- const llvm::APSInt &From,
- const llvm::APSInt &To,
- bool InRange) override;
+ ProgramStateRef assumeInclusiveRange(ProgramStateRef State, NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To,
+ bool InRange) override;
- ProgramStateRef assumeSymRel(ProgramStateRef state,
- const SymExpr *LHS,
- BinaryOperator::Opcode op,
- const llvm::APSInt& Int);
+ ProgramStateRef assumeSymRel(ProgramStateRef State, const SymExpr *LHS,
+ BinaryOperator::Opcode Op,
+ const llvm::APSInt &Int);
ProgramStateRef assumeSymWithinInclusiveRange(ProgramStateRef State,
SymbolRef Sym,
@@ -55,47 +53,45 @@ public:
const llvm::APSInt &To,
bool InRange);
-
protected:
-
//===------------------------------------------------------------------===//
// Interface that subclasses must implement.
//===------------------------------------------------------------------===//
- // Each of these is of the form "$sym+Adj <> V", where "<>" is the comparison
+ // Each of these is of the form "$Sym+Adj <> V", where "<>" is the comparison
// operation for the method being invoked.
- virtual ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment) = 0;
+ virtual ProgramStateRef assumeSymNE(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) = 0;
- virtual ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment) = 0;
+ virtual ProgramStateRef assumeSymEQ(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) = 0;
- virtual ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment) = 0;
+ virtual ProgramStateRef assumeSymLT(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) = 0;
- virtual ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment) = 0;
+ virtual ProgramStateRef assumeSymGT(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) = 0;
- virtual ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment) = 0;
-
- virtual ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
- const llvm::APSInt& V,
- const llvm::APSInt& Adjustment) = 0;
+ virtual ProgramStateRef assumeSymLE(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) = 0;
+ virtual ProgramStateRef assumeSymGE(ProgramStateRef State, SymbolRef Sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) = 0;
virtual ProgramStateRef assumeSymbolWithinInclusiveRange(
ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
const llvm::APSInt &To, const llvm::APSInt &Adjustment) = 0;
virtual ProgramStateRef assumeSymbolOutOfInclusiveRange(
- ProgramStateRef state, SymbolRef Sym, const llvm::APSInt &From,
+ ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
const llvm::APSInt &To, const llvm::APSInt &Adjustment) = 0;
+
//===------------------------------------------------------------------===//
// Internal implementation.
//===------------------------------------------------------------------===//
@@ -105,13 +101,11 @@ protected:
bool canReasonAbout(SVal X) const override;
- ProgramStateRef assumeAux(ProgramStateRef state,
- NonLoc Cond,
- bool Assumption);
+ ProgramStateRef assumeAux(ProgramStateRef State, NonLoc Cond,
+ bool Assumption);
- ProgramStateRef assumeAuxForSymbol(ProgramStateRef State,
- SymbolRef Sym,
- bool Assumption);
+ ProgramStateRef assumeAuxForSymbol(ProgramStateRef State, SymbolRef Sym,
+ bool Assumption);
};
} // end GR namespace
diff --git a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 72b852b2e21d..28b43dd566d5 100644
--- a/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -69,6 +69,9 @@ SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
bool isLocType = Loc::isLocType(castTy);
+ if (val.getAs<nonloc::PointerToMember>())
+ return val;
+
if (Optional<nonloc::LocAsInteger> LI = val.getAs<nonloc::LocAsInteger>()) {
if (isLocType)
return LI->getLoc();
@@ -335,6 +338,21 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
switch (lhs.getSubKind()) {
default:
return makeSymExprValNN(state, op, lhs, rhs, resultTy);
+ case nonloc::PointerToMemberKind: {
+ assert(rhs.getSubKind() == nonloc::PointerToMemberKind &&
+ "Both SVals should have pointer-to-member-type");
+ auto LPTM = lhs.castAs<nonloc::PointerToMember>(),
+ RPTM = rhs.castAs<nonloc::PointerToMember>();
+ auto LPTMD = LPTM.getPTMData(), RPTMD = RPTM.getPTMData();
+ switch (op) {
+ case BO_EQ:
+ return makeTruthVal(LPTMD == RPTMD, resultTy);
+ case BO_NE:
+ return makeTruthVal(LPTMD != RPTMD, resultTy);
+ default:
+ return UnknownVal();
+ }
+ }
case nonloc::LocAsIntegerKind: {
Loc lhsL = lhs.castAs<nonloc::LocAsInteger>().getLoc();
switch (rhs.getSubKind()) {
@@ -753,6 +771,12 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
// Note, heap base symbolic regions are assumed to not alias with
// each other; for example, we assume that malloc returns different address
// on each invocation.
+ // FIXME: ObjC object pointers always reside on the heap, but currently
+ // we treat their memory space as unknown, because symbolic pointers
+ // to ObjC objects may alias. There should be a way to construct
+ // possibly-aliasing heap-based regions. For instance, MacOSXApiChecker
+ // guesses memory space for ObjC object pointers manually instead of
+ // relying on us.
if (LeftBase != RightBase &&
((!isa<SymbolicRegion>(LeftBase) && !isa<SymbolicRegion>(RightBase)) ||
(isa<HeapSpaceRegion>(LeftMS) || isa<HeapSpaceRegion>(RightMS))) ){
@@ -857,6 +881,23 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy) {
+ if (op >= BO_PtrMemD && op <= BO_PtrMemI) {
+ if (auto PTMSV = rhs.getAs<nonloc::PointerToMember>()) {
+ if (PTMSV->isNullMemberPointer())
+ return UndefinedVal();
+ if (const FieldDecl *FD = PTMSV->getDeclAs<FieldDecl>()) {
+ SVal Result = lhs;
+
+ for (const auto &I : *PTMSV)
+ Result = StateMgr.getStoreManager().evalDerivedToBase(
+ Result, I->getType(),I->isVirtual());
+ return state->getLValue(FD, Result);
+ }
+ }
+
+ return rhs;
+ }
+
assert(!BinaryOperator::isComparisonOp(op) &&
"arguments to comparison ops must be of the same type");
diff --git a/lib/StaticAnalyzer/Core/Store.cpp b/lib/StaticAnalyzer/Core/Store.cpp
index de29f0eedd12..aca6e3b6255b 100644
--- a/lib/StaticAnalyzer/Core/Store.cpp
+++ b/lib/StaticAnalyzer/Core/Store.cpp
@@ -292,7 +292,7 @@ static const CXXRecordDecl *getCXXRecordType(const MemRegion *MR) {
return nullptr;
}
-SVal StoreManager::evalDynamicCast(SVal Base, QualType TargetType,
+SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
bool &Failed) {
Failed = false;
diff --git a/lib/StaticAnalyzer/Core/SymbolManager.cpp b/lib/StaticAnalyzer/Core/SymbolManager.cpp
index b8b4af1179e5..4be85661b645 100644
--- a/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -85,7 +85,8 @@ void SymbolMetadata::dumpToStream(raw_ostream &os) const {
void SymbolData::anchor() { }
void SymbolRegionValue::dumpToStream(raw_ostream &os) const {
- os << "reg_$" << getSymbolID() << "<" << R << ">";
+ os << "reg_$" << getSymbolID()
+ << '<' << getType().getAsString() << ' ' << R << '>';
}
bool SymExpr::symbol_iterator::operator==(const symbol_iterator &X) const {
@@ -216,17 +217,18 @@ SymbolManager::getExtentSymbol(const SubRegion *R) {
return cast<SymbolExtent>(SD);
}
-const SymbolMetadata*
+const SymbolMetadata *
SymbolManager::getMetadataSymbol(const MemRegion* R, const Stmt *S, QualType T,
+ const LocationContext *LCtx,
unsigned Count, const void *SymbolTag) {
llvm::FoldingSetNodeID profile;
- SymbolMetadata::Profile(profile, R, S, T, Count, SymbolTag);
+ SymbolMetadata::Profile(profile, R, S, T, LCtx, Count, SymbolTag);
void *InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
SD = (SymExpr*) BPAlloc.Allocate<SymbolMetadata>();
- new (SD) SymbolMetadata(SymbolCounter, R, S, T, Count, SymbolTag);
+ new (SD) SymbolMetadata(SymbolCounter, R, S, T, LCtx, Count, SymbolTag);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
diff --git a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index 8ac229fc6583..b3e287ebf815 100644
--- a/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -13,17 +13,14 @@
#include "clang/StaticAnalyzer/Frontend/AnalysisConsumer.h"
#include "ModelInjector.h"
-#include "clang/AST/ASTConsumer.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
-#include "clang/AST/ParentMap.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CallGraph.h"
#include "clang/Analysis/CodeInjector.h"
-#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/Preprocessor.h"
@@ -36,9 +33,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
-#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -118,16 +113,28 @@ public:
Diag.Report(WarnLoc, WarnID) << PD->getShortDescription()
<< PD->path.back()->getRanges();
+ // First, add extra notes, even if paths should not be included.
+ for (const auto &Piece : PD->path) {
+ if (!isa<PathDiagnosticNotePiece>(Piece.get()))
+ continue;
+
+ SourceLocation NoteLoc = Piece->getLocation().asLocation();
+ Diag.Report(NoteLoc, NoteID) << Piece->getString()
+ << Piece->getRanges();
+ }
+
if (!IncludePath)
continue;
+ // Then, add the path notes if necessary.
PathPieces FlatPath = PD->path.flatten(/*ShouldFlattenMacros=*/true);
- for (PathPieces::const_iterator PI = FlatPath.begin(),
- PE = FlatPath.end();
- PI != PE; ++PI) {
- SourceLocation NoteLoc = (*PI)->getLocation().asLocation();
- Diag.Report(NoteLoc, NoteID) << (*PI)->getString()
- << (*PI)->getRanges();
+ for (const auto &Piece : FlatPath) {
+ if (isa<PathDiagnosticNotePiece>(Piece.get()))
+ continue;
+
+ SourceLocation NoteLoc = Piece->getLocation().asLocation();
+ Diag.Report(NoteLoc, NoteID) << Piece->getString()
+ << Piece->getRanges();
}
}
}
@@ -193,14 +200,16 @@ public:
Injector(injector) {
DigestAnalyzerOptions();
if (Opts->PrintStats) {
- llvm::EnableStatistics();
- TUTotalTimer = new llvm::Timer("Analyzer Total Time");
+ llvm::EnableStatistics(false);
+ TUTotalTimer = new llvm::Timer("time", "Analyzer Total Time");
}
}
~AnalysisConsumer() override {
- if (Opts->PrintStats)
+ if (Opts->PrintStats) {
delete TUTotalTimer;
+ llvm::PrintStatistics();
+ }
}
void DigestAnalyzerOptions() {
@@ -270,19 +279,8 @@ public:
else
assert(Mode == (AM_Syntax | AM_Path) && "Unexpected mode!");
- llvm::errs() << ": " << Loc.getFilename();
- if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
- const NamedDecl *ND = cast<NamedDecl>(D);
- llvm::errs() << ' ' << ND->getQualifiedNameAsString() << '\n';
- }
- else if (isa<BlockDecl>(D)) {
- llvm::errs() << ' ' << "block(line:" << Loc.getLine() << ",col:"
- << Loc.getColumn() << '\n';
- }
- else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- Selector S = MD->getSelector();
- llvm::errs() << ' ' << S.getAsString();
- }
+ llvm::errs() << ": " << Loc.getFilename() << ' '
+ << getFunctionName(D) << '\n';
}
}
@@ -382,6 +380,7 @@ public:
private:
void storeTopLevelDecls(DeclGroupRef DG);
+ std::string getFunctionName(const Decl *D);
/// \brief Check if we should skip (not analyze) the given function.
AnalysisMode getModeForDecl(Decl *D, AnalysisMode Mode);
@@ -431,6 +430,13 @@ static bool shouldSkipFunction(const Decl *D,
// Count naming convention errors more aggressively.
if (isa<ObjCMethodDecl>(D))
return false;
+ // We also want to reanalyze all C++ copy and move assignment operators to
+ // separately check the two cases where 'this' aliases with the parameter and
+ // where it may not. (cplusplus.SelfAssignmentChecker)
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator())
+ return false;
+ }
// Otherwise, if we visited the function before, do not reanalyze it.
return Visited.count(D);
@@ -442,9 +448,7 @@ AnalysisConsumer::getInliningModeForFunction(const Decl *D,
// We want to reanalyze all ObjC methods as top level to report Retain
// Count naming convention errors more aggressively. But we should tune down
// inlining when reanalyzing an already inlined function.
- if (Visited.count(D)) {
- assert(isa<ObjCMethodDecl>(D) &&
- "We are only reanalyzing ObjCMethods.");
+ if (Visited.count(D) && isa<ObjCMethodDecl>(D)) {
const ObjCMethodDecl *ObjCM = cast<ObjCMethodDecl>(D);
if (ObjCM->getMethodFamily() != OMF_init)
return ExprEngine::Inline_Minimal;
@@ -568,16 +572,64 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
}
-static std::string getFunctionName(const Decl *D) {
- if (const ObjCMethodDecl *ID = dyn_cast<ObjCMethodDecl>(D)) {
- return ID->getSelector().getAsString();
- }
- if (const FunctionDecl *ND = dyn_cast<FunctionDecl>(D)) {
- IdentifierInfo *II = ND->getIdentifier();
- if (II)
- return II->getName();
+std::string AnalysisConsumer::getFunctionName(const Decl *D) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ OS << FD->getQualifiedNameAsString();
+
+ // In C++, there are overloads.
+ if (Ctx->getLangOpts().CPlusPlus) {
+ OS << '(';
+ for (const auto &P : FD->parameters()) {
+ if (P != *FD->param_begin())
+ OS << ", ";
+ OS << P->getType().getAsString();
+ }
+ OS << ')';
+ }
+
+ } else if (isa<BlockDecl>(D)) {
+ PresumedLoc Loc = Ctx->getSourceManager().getPresumedLoc(D->getLocation());
+
+ if (Loc.isValid()) {
+ OS << "block (line: " << Loc.getLine() << ", col: " << Loc.getColumn()
+ << ')';
+ }
+
+ } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
+
+ // FIXME: copy-pasted from CGDebugInfo.cpp.
+ OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
+ const DeclContext *DC = OMD->getDeclContext();
+ if (const auto *OID = dyn_cast<ObjCImplementationDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const auto *OID = dyn_cast<ObjCInterfaceDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(DC)) {
+ if (OC->IsClassExtension()) {
+ OS << OC->getClassInterface()->getName();
+ } else {
+ OS << OC->getIdentifier()->getNameStart() << '('
+ << OC->getIdentifier()->getNameStart() << ')';
+ }
+ } else if (const auto *OCD = dyn_cast<ObjCCategoryImplDecl>(DC)) {
+ OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '('
+ << OCD->getIdentifier()->getNameStart() << ')';
+ } else if (isa<ObjCProtocolDecl>(DC)) {
+ // We can extract the type of the class from the self pointer.
+ if (ImplicitParamDecl *SelfDecl = OMD->getSelfDecl()) {
+ QualType ClassTy =
+ cast<ObjCObjectPointerType>(SelfDecl->getType())->getPointeeType();
+ ClassTy.print(OS, PrintingPolicy(LangOptions()));
+ }
+ }
+ OS << ' ' << OMD->getSelector().getAsString() << ']';
+
}
- return "";
+
+ return OS.str();
}
AnalysisConsumer::AnalysisMode
@@ -614,6 +666,12 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
if (Mode == AM_None)
return;
+ // Clear the AnalysisManager of old AnalysisDeclContexts.
+ Mgr->ClearContexts();
+ // Ignore autosynthesized code.
+ if (Mgr->getAnalysisDeclContext(D)->isBodyAutosynthesized())
+ return;
+
DisplayFunction(D, Mode, IMode);
CFG *DeclCFG = Mgr->getCFG(D);
if (DeclCFG) {
@@ -621,8 +679,6 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
MaxCFGSize = MaxCFGSize < CFGSize ? CFGSize : MaxCFGSize;
}
- // Clear the AnalysisManager of old AnalysisDeclContexts.
- Mgr->ClearContexts();
BugReporter BR(*Mgr);
if (Mode & AM_Syntax)
diff --git a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
index 75fa4c651ace..31b6638e651f 100644
--- a/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
+++ b/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -101,6 +101,16 @@ void ClangCheckerRegistry::warnIncompatible(DiagnosticsEngine *diags,
<< pluginAPIVersion;
}
+static SmallVector<CheckerOptInfo, 8>
+getCheckerOptList(const AnalyzerOptions &opts) {
+ SmallVector<CheckerOptInfo, 8> checkerOpts;
+ for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
+ const std::pair<std::string, bool> &opt = opts.CheckersControlList[i];
+ checkerOpts.push_back(CheckerOptInfo(opt.first, opt.second));
+ }
+ return checkerOpts;
+}
+
std::unique_ptr<CheckerManager>
ento::createCheckerManager(AnalyzerOptions &opts, const LangOptions &langOpts,
ArrayRef<std::string> plugins,
@@ -108,11 +118,7 @@ ento::createCheckerManager(AnalyzerOptions &opts, const LangOptions &langOpts,
std::unique_ptr<CheckerManager> checkerMgr(
new CheckerManager(langOpts, &opts));
- SmallVector<CheckerOptInfo, 8> checkerOpts;
- for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
- const std::pair<std::string, bool> &opt = opts.CheckersControlList[i];
- checkerOpts.push_back(CheckerOptInfo(opt.first.c_str(), opt.second));
- }
+ SmallVector<CheckerOptInfo, 8> checkerOpts = getCheckerOptList(opts);
ClangCheckerRegistry allCheckers(plugins, &diags);
allCheckers.initializeManager(*checkerMgr, checkerOpts);
@@ -137,3 +143,12 @@ void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins) {
ClangCheckerRegistry(plugins).printHelp(out);
}
+
+void ento::printEnabledCheckerList(raw_ostream &out,
+ ArrayRef<std::string> plugins,
+ const AnalyzerOptions &opts) {
+ out << "OVERVIEW: Clang Static Analyzer Enabled Checkers List\n\n";
+
+ SmallVector<CheckerOptInfo, 8> checkerOpts = getCheckerOptList(opts);
+ ClangCheckerRegistry(plugins).printList(out, checkerOpts);
+}
diff --git a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
index ee2c3f513cdf..0a284851b08d 100644
--- a/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
+++ b/lib/StaticAnalyzer/Frontend/ModelInjector.cpp
@@ -19,7 +19,6 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CrashRecoveryContext.h"
#include "llvm/Support/FileSystem.h"
-#include <string>
#include <utility>
using namespace clang;
diff --git a/lib/StaticAnalyzer/Frontend/ModelInjector.h b/lib/StaticAnalyzer/Frontend/ModelInjector.h
index e23bf8abf384..98a5f69d68e8 100644
--- a/lib/StaticAnalyzer/Frontend/ModelInjector.h
+++ b/lib/StaticAnalyzer/Frontend/ModelInjector.h
@@ -25,11 +25,7 @@
#define LLVM_CLANG_SA_FRONTEND_MODELINJECTOR_H
#include "clang/Analysis/CodeInjector.h"
-#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringMap.h"
-#include <map>
-#include <memory>
-#include <vector>
namespace clang {
diff --git a/lib/Tooling/ArgumentsAdjusters.cpp b/lib/Tooling/ArgumentsAdjusters.cpp
index 2f3d829d7d19..48b925c698a7 100644
--- a/lib/Tooling/ArgumentsAdjusters.cpp
+++ b/lib/Tooling/ArgumentsAdjusters.cpp
@@ -17,7 +17,7 @@
namespace clang {
namespace tooling {
-/// Add -fsyntax-only option to the commnand line arguments.
+/// Add -fsyntax-only option to the command line arguments.
ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
return [](const CommandLineArguments &Args, StringRef /*unused*/) {
CommandLineArguments AdjustedArgs;
diff --git a/lib/Tooling/CMakeLists.txt b/lib/Tooling/CMakeLists.txt
index 56134c1164d4..2eec1dba2f36 100644
--- a/lib/Tooling/CMakeLists.txt
+++ b/lib/Tooling/CMakeLists.txt
@@ -1,4 +1,7 @@
-set(LLVM_LINK_COMPONENTS support)
+set(LLVM_LINK_COMPONENTS
+ Option
+ Support
+ )
add_subdirectory(Core)
@@ -13,6 +16,9 @@ add_clang_library(clangTooling
RefactoringCallbacks.cpp
Tooling.cpp
+ DEPENDS
+ ClangDriverOptions
+
LINK_LIBS
clangAST
clangASTMatchers
diff --git a/lib/Tooling/CompilationDatabase.cpp b/lib/Tooling/CompilationDatabase.cpp
index 8fc4a1fe5beb..8ca0b2df7013 100644
--- a/lib/Tooling/CompilationDatabase.cpp
+++ b/lib/Tooling/CompilationDatabase.cpp
@@ -32,12 +32,14 @@
using namespace clang;
using namespace tooling;
+LLVM_INSTANTIATE_REGISTRY(CompilationDatabasePluginRegistry)
+
CompilationDatabase::~CompilationDatabase() {}
std::unique_ptr<CompilationDatabase>
CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
std::string &ErrorMessage) {
- std::stringstream ErrorStream;
+ llvm::raw_string_ostream ErrorStream(ErrorMessage);
for (CompilationDatabasePluginRegistry::iterator
It = CompilationDatabasePluginRegistry::begin(),
Ie = CompilationDatabasePluginRegistry::end();
@@ -49,7 +51,6 @@ CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
return DB;
ErrorStream << It->getName() << ": " << DatabaseErrorMessage << "\n";
}
- ErrorMessage = ErrorStream.str();
return nullptr;
}
@@ -299,7 +300,8 @@ FixedCompilationDatabase(Twine Directory, ArrayRef<std::string> CommandLine) {
ToolCommandLine.insert(ToolCommandLine.end(),
CommandLine.begin(), CommandLine.end());
CompileCommands.emplace_back(Directory, StringRef(),
- std::move(ToolCommandLine));
+ std::move(ToolCommandLine),
+ StringRef());
}
std::vector<CompileCommand>
diff --git a/lib/Tooling/Core/Lookup.cpp b/lib/Tooling/Core/Lookup.cpp
index 697eeb46ce41..6edf61b8050d 100644
--- a/lib/Tooling/Core/Lookup.cpp
+++ b/lib/Tooling/Core/Lookup.cpp
@@ -13,37 +13,69 @@
#include "clang/Tooling/Core/Lookup.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
using namespace clang;
using namespace clang::tooling;
-static bool isInsideDifferentNamespaceWithSameName(const DeclContext *DeclA,
- const DeclContext *DeclB) {
- while (true) {
- // Look past non-namespaces on DeclA.
- while (DeclA && !isa<NamespaceDecl>(DeclA))
- DeclA = DeclA->getParent();
-
- // Look past non-namespaces on DeclB.
- while (DeclB && !isa<NamespaceDecl>(DeclB))
- DeclB = DeclB->getParent();
-
- // We hit the root, no namespace collision.
- if (!DeclA || !DeclB)
- return false;
+// Gets all namespaces that \p Context is in as a vector (ignoring anonymous
+// namespaces). The inner namespaces come before outer namespaces in the vector.
+// For example, if the context is in the following namespace:
+// `namespace a { namespace b { namespace c ( ... ) } }`,
+// the vector will be `{c, b, a}`.
+static llvm::SmallVector<const NamespaceDecl *, 4>
+getAllNamedNamespaces(const DeclContext *Context) {
+ llvm::SmallVector<const NamespaceDecl *, 4> Namespaces;
+ auto GetNextNamedNamespace = [](const DeclContext *Context) {
+ // Look past non-namespaces and anonymous namespaces on FromContext.
+ while (Context && (!isa<NamespaceDecl>(Context) ||
+ cast<NamespaceDecl>(Context)->isAnonymousNamespace()))
+ Context = Context->getParent();
+ return Context;
+ };
+ for (Context = GetNextNamedNamespace(Context); Context != nullptr;
+ Context = GetNextNamedNamespace(Context->getParent()))
+ Namespaces.push_back(cast<NamespaceDecl>(Context));
+ return Namespaces;
+}
+// Returns true if the context in which the type is used and the context in
+// which the type is declared are the same semantical namespace but different
+// lexical namespaces.
+static bool
+usingFromDifferentCanonicalNamespace(const DeclContext *FromContext,
+ const DeclContext *UseContext) {
+ // We can skip anonymous namespace because:
+ // 1. `FromContext` and `UseContext` must be in the same anonymous namespaces
+ // since referencing across anonymous namespaces is not possible.
+ // 2. If `FromContext` and `UseContext` are in the same anonymous namespace,
+ // the function will still return `false` as expected.
+ llvm::SmallVector<const NamespaceDecl *, 4> FromNamespaces =
+ getAllNamedNamespaces(FromContext);
+ llvm::SmallVector<const NamespaceDecl *, 4> UseNamespaces =
+ getAllNamedNamespaces(UseContext);
+ // If `UseContext` has fewer level of nested namespaces, it cannot be in the
+ // same canonical namespace as the `FromContext`.
+ if (UseNamespaces.size() < FromNamespaces.size())
+ return false;
+ unsigned Diff = UseNamespaces.size() - FromNamespaces.size();
+ auto FromIter = FromNamespaces.begin();
+ // Only compare `FromNamespaces` with namespaces in `UseNamespaces` that can
+ // collide, i.e. the top N namespaces where N is the number of namespaces in
+ // `FromNamespaces`.
+ auto UseIter = UseNamespaces.begin() + Diff;
+ for (; FromIter != FromNamespaces.end() && UseIter != UseNamespaces.end();
+ ++FromIter, ++UseIter) {
// Literally the same namespace, not a collision.
- if (DeclA == DeclB)
+ if (*FromIter == *UseIter)
return false;
-
- // Now check the names. If they match we have a different namespace with the
- // same name.
- if (cast<NamespaceDecl>(DeclA)->getDeclName() ==
- cast<NamespaceDecl>(DeclB)->getDeclName())
+ // Now check the names. If they match we have a different canonical
+ // namespace with the same name.
+ if (cast<NamespaceDecl>(*FromIter)->getDeclName() ==
+ cast<NamespaceDecl>(*UseIter)->getDeclName())
return true;
-
- DeclA = DeclA->getParent();
- DeclB = DeclB->getParent();
}
+ assert(FromIter == FromNamespaces.end() && UseIter == UseNamespaces.end());
+ return false;
}
static StringRef getBestNamespaceSubstr(const DeclContext *DeclA,
@@ -90,16 +122,22 @@ std::string tooling::replaceNestedName(const NestedNameSpecifier *Use,
"Expected fully-qualified name!");
// We can do a raw name replacement when we are not inside the namespace for
- // the original function and it is not in the global namespace. The
+ // the original class/function and it is not in the global namespace. The
// assumption is that outside the original namespace we must have a using
// statement that makes this work out and that other parts of this refactor
- // will automatically fix using statements to point to the new function
+ // will automatically fix using statements to point to the new class/function.
+ // However, if the `FromDecl` is a class forward declaration, the reference is
+ // still considered as referring to the original definition, so we can't do a
+ // raw name replacement in this case.
const bool class_name_only = !Use;
const bool in_global_namespace =
isa<TranslationUnitDecl>(FromDecl->getDeclContext());
- if (class_name_only && !in_global_namespace &&
- !isInsideDifferentNamespaceWithSameName(FromDecl->getDeclContext(),
- UseContext)) {
+ const bool is_class_forward_decl =
+ isa<CXXRecordDecl>(FromDecl) &&
+ !cast<CXXRecordDecl>(FromDecl)->isCompleteDefinition();
+ if (class_name_only && !in_global_namespace && !is_class_forward_decl &&
+ !usingFromDifferentCanonicalNamespace(FromDecl->getDeclContext(),
+ UseContext)) {
auto Pos = ReplacementString.rfind("::");
return Pos != StringRef::npos ? ReplacementString.substr(Pos + 2)
: ReplacementString;
diff --git a/lib/Tooling/Core/QualTypeNames.cpp b/lib/Tooling/Core/QualTypeNames.cpp
index 619dae1ee106..721c2c92fc27 100644
--- a/lib/Tooling/Core/QualTypeNames.cpp
+++ b/lib/Tooling/Core/QualTypeNames.cpp
@@ -14,8 +14,6 @@
#include "clang/AST/DeclarationName.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Mangle.h"
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/StringRef.h"
#include <stdio.h>
#include <memory>
diff --git a/lib/Tooling/Core/Replacement.cpp b/lib/Tooling/Core/Replacement.cpp
index 4f130709ac16..e194b59a6e2b 100644
--- a/lib/Tooling/Core/Replacement.cpp
+++ b/lib/Tooling/Core/Replacement.cpp
@@ -20,6 +20,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/Lexer.h"
#include "clang/Rewrite/Core/Rewriter.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_os_ostream.h"
@@ -29,8 +30,7 @@ namespace tooling {
static const char * const InvalidLocation = "";
-Replacement::Replacement()
- : FilePath(InvalidLocation) {}
+Replacement::Replacement() : FilePath(InvalidLocation) {}
Replacement::Replacement(StringRef FilePath, unsigned Offset, unsigned Length,
StringRef ReplacementText)
@@ -84,11 +84,8 @@ bool operator<(const Replacement &LHS, const Replacement &RHS) {
if (LHS.getOffset() != RHS.getOffset())
return LHS.getOffset() < RHS.getOffset();
- // Apply longer replacements first, specifically so that deletions are
- // executed before insertions. It is (hopefully) never the intention to
- // delete parts of newly inserted code.
if (LHS.getLength() != RHS.getLength())
- return LHS.getLength() > RHS.getLength();
+ return LHS.getLength() < RHS.getLength();
if (LHS.getFilePath() != RHS.getFilePath())
return LHS.getFilePath() < RHS.getFilePath();
@@ -138,200 +135,196 @@ void Replacement::setFromSourceRange(const SourceManager &Sources,
ReplacementText);
}
-template <typename T>
-unsigned shiftedCodePositionInternal(const T &Replaces, unsigned Position) {
- unsigned Offset = 0;
- for (const auto& R : Replaces) {
- if (R.getOffset() + R.getLength() <= Position) {
- Offset += R.getReplacementText().size() - R.getLength();
- continue;
- }
- if (R.getOffset() < Position &&
- R.getOffset() + R.getReplacementText().size() <= Position) {
- Position = R.getOffset() + R.getReplacementText().size() - 1;
- }
- break;
- }
- return Position + Offset;
+Replacement
+Replacements::getReplacementInChangedCode(const Replacement &R) const {
+ unsigned NewStart = getShiftedCodePosition(R.getOffset());
+ unsigned NewEnd = getShiftedCodePosition(R.getOffset() + R.getLength());
+ return Replacement(R.getFilePath(), NewStart, NewEnd - NewStart,
+ R.getReplacementText());
}
-unsigned shiftedCodePosition(const Replacements &Replaces, unsigned Position) {
- return shiftedCodePositionInternal(Replaces, Position);
+static std::string getReplacementErrString(replacement_error Err) {
+ switch (Err) {
+ case replacement_error::fail_to_apply:
+ return "Failed to apply a replacement.";
+ case replacement_error::wrong_file_path:
+ return "The new replacement's file path is different from the file path of "
+ "existing replacements";
+ case replacement_error::overlap_conflict:
+ return "The new replacement overlaps with an existing replacement.";
+ case replacement_error::insert_conflict:
+ return "The new insertion has the same insert location as an existing "
+ "replacement.";
+ }
+ llvm_unreachable("A value of replacement_error has no message.");
}
-// FIXME: Remove this function when Replacements is implemented as std::vector
-// instead of std::set.
-unsigned shiftedCodePosition(const std::vector<Replacement> &Replaces,
- unsigned Position) {
- return shiftedCodePositionInternal(Replaces, Position);
+std::string ReplacementError::message() const {
+ std::string Message = getReplacementErrString(Err);
+ if (NewReplacement.hasValue())
+ Message += "\nNew replacement: " + NewReplacement->toString();
+ if (ExistingReplacement.hasValue())
+ Message += "\nExisting replacement: " + ExistingReplacement->toString();
+ return Message;
}
-void deduplicate(std::vector<Replacement> &Replaces,
- std::vector<Range> &Conflicts) {
- if (Replaces.empty())
- return;
-
- auto LessNoPath = [](const Replacement &LHS, const Replacement &RHS) {
- if (LHS.getOffset() != RHS.getOffset())
- return LHS.getOffset() < RHS.getOffset();
- if (LHS.getLength() != RHS.getLength())
- return LHS.getLength() < RHS.getLength();
- return LHS.getReplacementText() < RHS.getReplacementText();
- };
-
- auto EqualNoPath = [](const Replacement &LHS, const Replacement &RHS) {
- return LHS.getOffset() == RHS.getOffset() &&
- LHS.getLength() == RHS.getLength() &&
- LHS.getReplacementText() == RHS.getReplacementText();
- };
+char ReplacementError::ID = 0;
- // Deduplicate. We don't want to deduplicate based on the path as we assume
- // that all replacements refer to the same file (or are symlinks).
- std::sort(Replaces.begin(), Replaces.end(), LessNoPath);
- Replaces.erase(std::unique(Replaces.begin(), Replaces.end(), EqualNoPath),
- Replaces.end());
-
- // Detect conflicts
- Range ConflictRange(Replaces.front().getOffset(),
- Replaces.front().getLength());
- unsigned ConflictStart = 0;
- unsigned ConflictLength = 1;
- for (unsigned i = 1; i < Replaces.size(); ++i) {
- Range Current(Replaces[i].getOffset(), Replaces[i].getLength());
- if (ConflictRange.overlapsWith(Current)) {
- // Extend conflicted range
- ConflictRange = Range(ConflictRange.getOffset(),
- std::max(ConflictRange.getLength(),
- Current.getOffset() + Current.getLength() -
- ConflictRange.getOffset()));
- ++ConflictLength;
- } else {
- if (ConflictLength > 1)
- Conflicts.push_back(Range(ConflictStart, ConflictLength));
- ConflictRange = Current;
- ConflictStart = i;
- ConflictLength = 1;
+Replacements Replacements::getCanonicalReplacements() const {
+ std::vector<Replacement> NewReplaces;
+ // Merge adjacent replacements.
+ for (const auto &R : Replaces) {
+ if (NewReplaces.empty()) {
+ NewReplaces.push_back(R);
+ continue;
}
- }
-
- if (ConflictLength > 1)
- Conflicts.push_back(Range(ConflictStart, ConflictLength));
-}
-
-bool applyAllReplacements(const Replacements &Replaces, Rewriter &Rewrite) {
- bool Result = true;
- for (Replacements::const_iterator I = Replaces.begin(),
- E = Replaces.end();
- I != E; ++I) {
- if (I->isApplicable()) {
- Result = I->apply(Rewrite) && Result;
+ auto &Prev = NewReplaces.back();
+ unsigned PrevEnd = Prev.getOffset() + Prev.getLength();
+ if (PrevEnd < R.getOffset()) {
+ NewReplaces.push_back(R);
} else {
- Result = false;
+ assert(PrevEnd == R.getOffset() &&
+ "Existing replacements must not overlap.");
+ Replacement NewR(
+ R.getFilePath(), Prev.getOffset(), Prev.getLength() + R.getLength(),
+ (Prev.getReplacementText() + R.getReplacementText()).str());
+ Prev = NewR;
}
}
- return Result;
+ ReplacementsImpl NewReplacesImpl(NewReplaces.begin(), NewReplaces.end());
+ return Replacements(NewReplacesImpl.begin(), NewReplacesImpl.end());
}
-// FIXME: Remove this function when Replacements is implemented as std::vector
-// instead of std::set.
-bool applyAllReplacements(const std::vector<Replacement> &Replaces,
- Rewriter &Rewrite) {
- bool Result = true;
- for (std::vector<Replacement>::const_iterator I = Replaces.begin(),
- E = Replaces.end();
- I != E; ++I) {
- if (I->isApplicable()) {
- Result = I->apply(Rewrite) && Result;
- } else {
- Result = false;
- }
- }
- return Result;
+// `R` and `Replaces` are order-independent if applying them in either order
+// has the same effect, so we need to compare replacements associated to
+// applying them in either order.
+llvm::Expected<Replacements>
+Replacements::mergeIfOrderIndependent(const Replacement &R) const {
+ Replacements Rs(R);
+ // A Replacements set containg a single replacement that is `R` referring to
+ // the code after the existing replacements `Replaces` are applied.
+ Replacements RsShiftedByReplaces(getReplacementInChangedCode(R));
+ // A Replacements set that is `Replaces` referring to the code after `R` is
+ // applied.
+ Replacements ReplacesShiftedByRs;
+ for (const auto &Replace : Replaces)
+ ReplacesShiftedByRs.Replaces.insert(
+ Rs.getReplacementInChangedCode(Replace));
+ // This is equivalent to applying `Replaces` first and then `R`.
+ auto MergeShiftedRs = merge(RsShiftedByReplaces);
+ // This is equivalent to applying `R` first and then `Replaces`.
+ auto MergeShiftedReplaces = Rs.merge(ReplacesShiftedByRs);
+
+ // Since empty or segmented replacements around existing replacements might be
+ // produced above, we need to compare replacements in canonical forms.
+ if (MergeShiftedRs.getCanonicalReplacements() ==
+ MergeShiftedReplaces.getCanonicalReplacements())
+ return MergeShiftedRs;
+ return llvm::make_error<ReplacementError>(replacement_error::overlap_conflict,
+ R, *Replaces.begin());
}
-llvm::Expected<std::string> applyAllReplacements(StringRef Code,
- const Replacements &Replaces) {
- if (Replaces.empty())
- return Code.str();
+llvm::Error Replacements::add(const Replacement &R) {
+ // Check the file path.
+ if (!Replaces.empty() && R.getFilePath() != Replaces.begin()->getFilePath())
+ return llvm::make_error<ReplacementError>(
+ replacement_error::wrong_file_path, R, *Replaces.begin());
- IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
- new vfs::InMemoryFileSystem);
- FileManager Files(FileSystemOptions(), InMemoryFileSystem);
- DiagnosticsEngine Diagnostics(
- IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
- new DiagnosticOptions);
- SourceManager SourceMgr(Diagnostics, Files);
- Rewriter Rewrite(SourceMgr, LangOptions());
- InMemoryFileSystem->addFile(
- "<stdin>", 0, llvm::MemoryBuffer::getMemBuffer(Code, "<stdin>"));
- FileID ID = SourceMgr.createFileID(Files.getFile("<stdin>"), SourceLocation(),
- clang::SrcMgr::C_User);
- for (Replacements::const_iterator I = Replaces.begin(), E = Replaces.end();
- I != E; ++I) {
- Replacement Replace("<stdin>", I->getOffset(), I->getLength(),
- I->getReplacementText());
- if (!Replace.apply(Rewrite))
- return llvm::make_error<llvm::StringError>(
- "Failed to apply replacement: " + Replace.toString(),
- llvm::inconvertibleErrorCode());
+ // Special-case header insertions.
+ if (R.getOffset() == UINT_MAX) {
+ Replaces.insert(R);
+ return llvm::Error::success();
}
- std::string Result;
- llvm::raw_string_ostream OS(Result);
- Rewrite.getEditBuffer(ID).write(OS);
- OS.flush();
- return Result;
-}
-// Merge and sort overlapping ranges in \p Ranges.
-static std::vector<Range> mergeAndSortRanges(std::vector<Range> Ranges) {
- std::sort(Ranges.begin(), Ranges.end(),
- [](const Range &LHS, const Range &RHS) {
- if (LHS.getOffset() != RHS.getOffset())
- return LHS.getOffset() < RHS.getOffset();
- return LHS.getLength() < RHS.getLength();
- });
- std::vector<Range> Result;
- for (const auto &R : Ranges) {
- if (Result.empty() ||
- Result.back().getOffset() + Result.back().getLength() < R.getOffset()) {
- Result.push_back(R);
- } else {
- unsigned NewEnd =
- std::max(Result.back().getOffset() + Result.back().getLength(),
- R.getOffset() + R.getLength());
- Result[Result.size() - 1] =
- Range(Result.back().getOffset(), NewEnd - Result.back().getOffset());
+ // This replacement cannot conflict with replacements that end before
+ // this replacement starts or start after this replacement ends.
+ // We also know that there currently are no overlapping replacements.
+ // Thus, we know that all replacements that start after the end of the current
+ // replacement cannot overlap.
+ Replacement AtEnd(R.getFilePath(), R.getOffset() + R.getLength(), 0, "");
+
+ // Find the first entry that starts after or at the end of R. Note that
+ // entries that start at the end can still be conflicting if R is an
+ // insertion.
+ auto I = Replaces.lower_bound(AtEnd);
+ // If `I` starts at the same offset as `R`, `R` must be an insertion.
+ if (I != Replaces.end() && R.getOffset() == I->getOffset()) {
+ assert(R.getLength() == 0);
+ // `I` is also an insertion, `R` and `I` conflict.
+ if (I->getLength() == 0) {
+ // Check if two insertions are order-indepedent: if inserting them in
+ // either order produces the same text, they are order-independent.
+ if ((R.getReplacementText() + I->getReplacementText()).str() !=
+ (I->getReplacementText() + R.getReplacementText()).str())
+ return llvm::make_error<ReplacementError>(
+ replacement_error::insert_conflict, R, *I);
+ // If insertions are order-independent, we can merge them.
+ Replacement NewR(
+ R.getFilePath(), R.getOffset(), 0,
+ (R.getReplacementText() + I->getReplacementText()).str());
+ Replaces.erase(I);
+ Replaces.insert(std::move(NewR));
+ return llvm::Error::success();
}
+ // Insertion `R` is adjacent to a non-insertion replacement `I`, so they
+ // are order-independent. It is safe to assume that `R` will not conflict
+ // with any replacement before `I` since all replacements before `I` must
+ // either end before `R` or end at `R` but has length > 0 (if the
+ // replacement before `I` is an insertion at `R`, it would have been `I`
+ // since it is a lower bound of `AtEnd` and ordered before the current `I`
+ // in the set).
+ Replaces.insert(R);
+ return llvm::Error::success();
}
- return Result;
-}
-std::vector<Range> calculateChangedRanges(const Replacements &Replaces) {
- std::vector<Range> ChangedRanges;
- int Shift = 0;
- for (const Replacement &R : Replaces) {
- unsigned Offset = R.getOffset() + Shift;
- unsigned Length = R.getReplacementText().size();
- Shift += Length - R.getLength();
- ChangedRanges.push_back(Range(Offset, Length));
+ // `I` is the smallest iterator (after `R`) whose entry cannot overlap.
+ // If that is begin(), there are no overlaps.
+ if (I == Replaces.begin()) {
+ Replaces.insert(R);
+ return llvm::Error::success();
}
- return mergeAndSortRanges(ChangedRanges);
-}
-
-std::vector<Range>
-calculateRangesAfterReplacements(const Replacements &Replaces,
- const std::vector<Range> &Ranges) {
- auto MergedRanges = mergeAndSortRanges(Ranges);
- tooling::Replacements FakeReplaces;
- for (const auto &R : MergedRanges)
- FakeReplaces.insert(Replacement(Replaces.begin()->getFilePath(),
- R.getOffset(), R.getLength(),
- std::string(R.getLength(), ' ')));
- tooling::Replacements NewReplaces = mergeReplacements(FakeReplaces, Replaces);
- return calculateChangedRanges(NewReplaces);
+ --I;
+ auto Overlap = [](const Replacement &R1, const Replacement &R2) -> bool {
+ return Range(R1.getOffset(), R1.getLength())
+ .overlapsWith(Range(R2.getOffset(), R2.getLength()));
+ };
+ // If the previous entry does not overlap, we know that entries before it
+ // can also not overlap.
+ if (!Overlap(R, *I)) {
+ // If `R` and `I` do not have the same offset, it is safe to add `R` since
+ // it must come after `I`. Otherwise:
+ // - If `R` is an insertion, `I` must not be an insertion since it would
+ // have come after `AtEnd`.
+ // - If `R` is not an insertion, `I` must be an insertion; otherwise, `R`
+ // and `I` would have overlapped.
+ // In either case, we can safely insert `R`.
+ Replaces.insert(R);
+ } else {
+ // `I` overlaps with `R`. We need to check `R` against all overlapping
+ // replacements to see if they are order-indepedent. If they are, merge `R`
+ // with them and replace them with the merged replacements.
+ auto MergeBegin = I;
+ auto MergeEnd = std::next(I);
+ while (I != Replaces.begin()) {
+ --I;
+ // If `I` doesn't overlap with `R`, don't merge it.
+ if (!Overlap(R, *I))
+ break;
+ MergeBegin = I;
+ }
+ Replacements OverlapReplaces(MergeBegin, MergeEnd);
+ llvm::Expected<Replacements> Merged =
+ OverlapReplaces.mergeIfOrderIndependent(R);
+ if (!Merged)
+ return Merged.takeError();
+ Replaces.erase(MergeBegin, MergeEnd);
+ Replaces.insert(Merged->begin(), Merged->end());
+ }
+ return llvm::Error::success();
}
namespace {
+
// Represents a merged replacement, i.e. a replacement consisting of multiple
// overlapping replacements from 'First' and 'Second' in mergeReplacements.
//
@@ -425,26 +418,19 @@ private:
unsigned Length;
std::string Text;
};
-} // namespace
-std::map<std::string, Replacements>
-groupReplacementsByFile(const Replacements &Replaces) {
- std::map<std::string, Replacements> FileToReplaces;
- for (const auto &Replace : Replaces) {
- FileToReplaces[Replace.getFilePath()].insert(Replace);
- }
- return FileToReplaces;
-}
+} // namespace
-Replacements mergeReplacements(const Replacements &First,
- const Replacements &Second) {
- if (First.empty() || Second.empty())
- return First.empty() ? Second : First;
+Replacements Replacements::merge(const Replacements &ReplacesToMerge) const {
+ if (empty() || ReplacesToMerge.empty())
+ return empty() ? ReplacesToMerge : *this;
+ auto &First = Replaces;
+ auto &Second = ReplacesToMerge.Replaces;
// Delta is the amount of characters that replacements from 'Second' need to
// be shifted so that their offsets refer to the original text.
int Delta = 0;
- Replacements Result;
+ ReplacementsImpl Result;
// Iterate over both sets and always add the next element (smallest total
// Offset) from either 'First' or 'Second'. Merge that element with
@@ -470,6 +456,143 @@ Replacements mergeReplacements(const Replacements &First,
Delta -= Merged.deltaFirst();
Result.insert(Merged.asReplacement());
}
+ return Replacements(Result.begin(), Result.end());
+}
+
+// Combines overlapping ranges in \p Ranges and sorts the combined ranges.
+// Returns a set of non-overlapping and sorted ranges that is equivalent to
+// \p Ranges.
+static std::vector<Range> combineAndSortRanges(std::vector<Range> Ranges) {
+ std::sort(Ranges.begin(), Ranges.end(),
+ [](const Range &LHS, const Range &RHS) {
+ if (LHS.getOffset() != RHS.getOffset())
+ return LHS.getOffset() < RHS.getOffset();
+ return LHS.getLength() < RHS.getLength();
+ });
+ std::vector<Range> Result;
+ for (const auto &R : Ranges) {
+ if (Result.empty() ||
+ Result.back().getOffset() + Result.back().getLength() < R.getOffset()) {
+ Result.push_back(R);
+ } else {
+ unsigned NewEnd =
+ std::max(Result.back().getOffset() + Result.back().getLength(),
+ R.getOffset() + R.getLength());
+ Result[Result.size() - 1] =
+ Range(Result.back().getOffset(), NewEnd - Result.back().getOffset());
+ }
+ }
+ return Result;
+}
+
+std::vector<Range>
+calculateRangesAfterReplacements(const Replacements &Replaces,
+ const std::vector<Range> &Ranges) {
+ // To calculate the new ranges,
+ // - Turn \p Ranges into Replacements at (offset, length) with an empty
+ // (unimportant) replacement text of length "length".
+ // - Merge with \p Replaces.
+ // - The new ranges will be the affected ranges of the merged replacements.
+ auto MergedRanges = combineAndSortRanges(Ranges);
+ if (Replaces.empty())
+ return MergedRanges;
+ tooling::Replacements FakeReplaces;
+ for (const auto &R : MergedRanges) {
+ auto Err = FakeReplaces.add(Replacement(Replaces.begin()->getFilePath(),
+ R.getOffset(), R.getLength(),
+ std::string(R.getLength(), ' ')));
+ assert(!Err &&
+ "Replacements must not conflict since ranges have been merged.");
+ (void)Err;
+ }
+ return FakeReplaces.merge(Replaces).getAffectedRanges();
+}
+
+std::vector<Range> Replacements::getAffectedRanges() const {
+ std::vector<Range> ChangedRanges;
+ int Shift = 0;
+ for (const Replacement &R : Replaces) {
+ unsigned Offset = R.getOffset() + Shift;
+ unsigned Length = R.getReplacementText().size();
+ Shift += Length - R.getLength();
+ ChangedRanges.push_back(Range(Offset, Length));
+ }
+ return combineAndSortRanges(ChangedRanges);
+}
+
+unsigned Replacements::getShiftedCodePosition(unsigned Position) const {
+ unsigned Offset = 0;
+ for (const auto& R : Replaces) {
+ if (R.getOffset() + R.getLength() <= Position) {
+ Offset += R.getReplacementText().size() - R.getLength();
+ continue;
+ }
+ if (R.getOffset() < Position &&
+ R.getOffset() + R.getReplacementText().size() <= Position) {
+ Position = R.getOffset() + R.getReplacementText().size();
+ if (R.getReplacementText().size() > 0)
+ Position--;
+ }
+ break;
+ }
+ return Position + Offset;
+}
+
+bool applyAllReplacements(const Replacements &Replaces, Rewriter &Rewrite) {
+ bool Result = true;
+ for (auto I = Replaces.rbegin(), E = Replaces.rend(); I != E; ++I) {
+ if (I->isApplicable()) {
+ Result = I->apply(Rewrite) && Result;
+ } else {
+ Result = false;
+ }
+ }
+ return Result;
+}
+
+llvm::Expected<std::string> applyAllReplacements(StringRef Code,
+ const Replacements &Replaces) {
+ if (Replaces.empty())
+ return Code.str();
+
+ IntrusiveRefCntPtr<vfs::InMemoryFileSystem> InMemoryFileSystem(
+ new vfs::InMemoryFileSystem);
+ FileManager Files(FileSystemOptions(), InMemoryFileSystem);
+ DiagnosticsEngine Diagnostics(
+ IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs),
+ new DiagnosticOptions);
+ SourceManager SourceMgr(Diagnostics, Files);
+ Rewriter Rewrite(SourceMgr, LangOptions());
+ InMemoryFileSystem->addFile(
+ "<stdin>", 0, llvm::MemoryBuffer::getMemBuffer(Code, "<stdin>"));
+ FileID ID = SourceMgr.createFileID(Files.getFile("<stdin>"), SourceLocation(),
+ clang::SrcMgr::C_User);
+ for (auto I = Replaces.rbegin(), E = Replaces.rend(); I != E; ++I) {
+ Replacement Replace("<stdin>", I->getOffset(), I->getLength(),
+ I->getReplacementText());
+ if (!Replace.apply(Rewrite))
+ return llvm::make_error<ReplacementError>(
+ replacement_error::fail_to_apply, Replace);
+ }
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ Rewrite.getEditBuffer(ID).write(OS);
+ OS.flush();
+ return Result;
+}
+
+std::map<std::string, Replacements> groupReplacementsByFile(
+ FileManager &FileMgr,
+ const std::map<std::string, Replacements> &FileToReplaces) {
+ std::map<std::string, Replacements> Result;
+ llvm::SmallPtrSet<const FileEntry *, 16> ProcessedFileEntries;
+ for (const auto &Entry : FileToReplaces) {
+ const FileEntry *FE = FileMgr.getFile(Entry.first);
+ if (!FE)
+ llvm::errs() << "File path " << Entry.first << " is invalid.\n";
+ else if (ProcessedFileEntries.insert(FE).second)
+ Result[Entry.first] = std::move(Entry.second);
+ }
return Result;
}
diff --git a/lib/Tooling/JSONCompilationDatabase.cpp b/lib/Tooling/JSONCompilationDatabase.cpp
index 299fbdc149bf..738e610ed946 100644
--- a/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/lib/Tooling/JSONCompilationDatabase.cpp
@@ -16,7 +16,10 @@
#include "clang/Tooling/CompilationDatabasePluginRegistry.h"
#include "clang/Tooling/Tooling.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/StringSaver.h"
#include <system_error>
namespace clang {
@@ -111,8 +114,29 @@ class CommandLineArgumentParser {
std::vector<std::string> CommandLine;
};
-std::vector<std::string> unescapeCommandLine(
- StringRef EscapedCommandLine) {
+std::vector<std::string> unescapeCommandLine(JSONCommandLineSyntax Syntax,
+ StringRef EscapedCommandLine) {
+ if (Syntax == JSONCommandLineSyntax::AutoDetect) {
+ Syntax = JSONCommandLineSyntax::Gnu;
+ llvm::Triple Triple(llvm::sys::getProcessTriple());
+ if (Triple.getOS() == llvm::Triple::OSType::Win32) {
+ // Assume Windows command line parsing on Win32 unless the triple
+ // explicitly tells us otherwise.
+ if (!Triple.hasEnvironment() ||
+ Triple.getEnvironment() == llvm::Triple::EnvironmentType::MSVC)
+ Syntax = JSONCommandLineSyntax::Windows;
+ }
+ }
+
+ if (Syntax == JSONCommandLineSyntax::Windows) {
+ llvm::BumpPtrAllocator Alloc;
+ llvm::StringSaver Saver(Alloc);
+ llvm::SmallVector<const char *, 64> T;
+ llvm::cl::TokenizeWindowsCommandLine(EscapedCommandLine, Saver, T);
+ std::vector<std::string> Result(T.begin(), T.end());
+ return Result;
+ }
+ assert(Syntax == JSONCommandLineSyntax::Gnu);
CommandLineArgumentParser parser(EscapedCommandLine);
return parser.parse();
}
@@ -123,7 +147,8 @@ class JSONCompilationDatabasePlugin : public CompilationDatabasePlugin {
SmallString<1024> JSONDatabasePath(Directory);
llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
std::unique_ptr<CompilationDatabase> Database(
- JSONCompilationDatabase::loadFromFile(JSONDatabasePath, ErrorMessage));
+ JSONCompilationDatabase::loadFromFile(
+ JSONDatabasePath, ErrorMessage, JSONCommandLineSyntax::AutoDetect));
if (!Database)
return nullptr;
return Database;
@@ -143,7 +168,8 @@ volatile int JSONAnchorSource = 0;
std::unique_ptr<JSONCompilationDatabase>
JSONCompilationDatabase::loadFromFile(StringRef FilePath,
- std::string &ErrorMessage) {
+ std::string &ErrorMessage,
+ JSONCommandLineSyntax Syntax) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> DatabaseBuffer =
llvm::MemoryBuffer::getFile(FilePath);
if (std::error_code Result = DatabaseBuffer.getError()) {
@@ -151,7 +177,7 @@ JSONCompilationDatabase::loadFromFile(StringRef FilePath,
return nullptr;
}
std::unique_ptr<JSONCompilationDatabase> Database(
- new JSONCompilationDatabase(std::move(*DatabaseBuffer)));
+ new JSONCompilationDatabase(std::move(*DatabaseBuffer), Syntax));
if (!Database->parse(ErrorMessage))
return nullptr;
return Database;
@@ -159,11 +185,12 @@ JSONCompilationDatabase::loadFromFile(StringRef FilePath,
std::unique_ptr<JSONCompilationDatabase>
JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString,
- std::string &ErrorMessage) {
+ std::string &ErrorMessage,
+ JSONCommandLineSyntax Syntax) {
std::unique_ptr<llvm::MemoryBuffer> DatabaseBuffer(
llvm::MemoryBuffer::getMemBuffer(DatabaseString));
std::unique_ptr<JSONCompilationDatabase> Database(
- new JSONCompilationDatabase(std::move(DatabaseBuffer)));
+ new JSONCompilationDatabase(std::move(DatabaseBuffer), Syntax));
if (!Database->parse(ErrorMessage))
return nullptr;
return Database;
@@ -211,10 +238,11 @@ JSONCompilationDatabase::getAllCompileCommands() const {
}
static std::vector<std::string>
-nodeToCommandLine(const std::vector<llvm::yaml::ScalarNode *> &Nodes) {
+nodeToCommandLine(JSONCommandLineSyntax Syntax,
+ const std::vector<llvm::yaml::ScalarNode *> &Nodes) {
SmallString<1024> Storage;
if (Nodes.size() == 1) {
- return unescapeCommandLine(Nodes[0]->getValue(Storage));
+ return unescapeCommandLine(Syntax, Nodes[0]->getValue(Storage));
}
std::vector<std::string> Arguments;
for (auto *Node : Nodes) {
@@ -229,10 +257,13 @@ void JSONCompilationDatabase::getCommands(
for (int I = 0, E = CommandsRef.size(); I != E; ++I) {
SmallString<8> DirectoryStorage;
SmallString<32> FilenameStorage;
+ SmallString<32> OutputStorage;
+ auto Output = std::get<3>(CommandsRef[I]);
Commands.emplace_back(
- std::get<0>(CommandsRef[I])->getValue(DirectoryStorage),
- std::get<1>(CommandsRef[I])->getValue(FilenameStorage),
- nodeToCommandLine(std::get<2>(CommandsRef[I])));
+ std::get<0>(CommandsRef[I])->getValue(DirectoryStorage),
+ std::get<1>(CommandsRef[I])->getValue(FilenameStorage),
+ nodeToCommandLine(Syntax, std::get<2>(CommandsRef[I])),
+ Output ? Output->getValue(OutputStorage) : "");
}
}
@@ -261,6 +292,7 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
llvm::yaml::ScalarNode *Directory = nullptr;
llvm::Optional<std::vector<llvm::yaml::ScalarNode *>> Command;
llvm::yaml::ScalarNode *File = nullptr;
+ llvm::yaml::ScalarNode *Output = nullptr;
for (auto& NextKeyValue : *Object) {
llvm::yaml::ScalarNode *KeyString =
dyn_cast<llvm::yaml::ScalarNode>(NextKeyValue.getKey());
@@ -303,6 +335,8 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
Command = std::vector<llvm::yaml::ScalarNode *>(1, ValueString);
} else if (KeyValue == "file") {
File = ValueString;
+ } else if (KeyValue == "output") {
+ Output = ValueString;
} else {
ErrorMessage = ("Unknown key: \"" +
KeyString->getRawValue() + "\"").str();
@@ -333,7 +367,7 @@ bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
} else {
llvm::sys::path::native(FileName, NativeFilePath);
}
- auto Cmd = CompileCommandRef(Directory, File, *Command);
+ auto Cmd = CompileCommandRef(Directory, File, *Command, Output);
IndexByFile[NativeFilePath].push_back(Cmd);
AllCommands.push_back(Cmd);
MatchTrie.insert(NativeFilePath);
diff --git a/lib/Tooling/Refactoring.cpp b/lib/Tooling/Refactoring.cpp
index 28d535aeb45f..308c1ac48b28 100644
--- a/lib/Tooling/Refactoring.cpp
+++ b/lib/Tooling/Refactoring.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Tooling/Refactoring.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
@@ -18,8 +19,6 @@
#include "clang/Frontend/TextDiagnosticPrinter.h"
#include "clang/Lex/Lexer.h"
#include "clang/Rewrite/Core/Rewriter.h"
-#include "clang/Tooling/Refactoring.h"
-#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_os_ostream.h"
@@ -31,7 +30,9 @@ RefactoringTool::RefactoringTool(
std::shared_ptr<PCHContainerOperations> PCHContainerOps)
: ClangTool(Compilations, SourcePaths, PCHContainerOps) {}
-Replacements &RefactoringTool::getReplacements() { return Replace; }
+std::map<std::string, Replacements> &RefactoringTool::getReplacements() {
+ return FileToReplaces;
+}
int RefactoringTool::runAndSave(FrontendActionFactory *ActionFactory) {
if (int Result = run(ActionFactory)) {
@@ -55,22 +56,26 @@ int RefactoringTool::runAndSave(FrontendActionFactory *ActionFactory) {
}
bool RefactoringTool::applyAllReplacements(Rewriter &Rewrite) {
- return tooling::applyAllReplacements(Replace, Rewrite);
+ bool Result = true;
+ for (const auto &Entry : groupReplacementsByFile(
+ Rewrite.getSourceMgr().getFileManager(), FileToReplaces))
+ Result = tooling::applyAllReplacements(Entry.second, Rewrite) && Result;
+ return Result;
}
int RefactoringTool::saveRewrittenFiles(Rewriter &Rewrite) {
return Rewrite.overwriteChangedFiles() ? 1 : 0;
}
-bool formatAndApplyAllReplacements(const Replacements &Replaces,
- Rewriter &Rewrite, StringRef Style) {
+bool formatAndApplyAllReplacements(
+ const std::map<std::string, Replacements> &FileToReplaces, Rewriter &Rewrite,
+ StringRef Style) {
SourceManager &SM = Rewrite.getSourceMgr();
FileManager &Files = SM.getFileManager();
- auto FileToReplaces = groupReplacementsByFile(Replaces);
-
bool Result = true;
- for (const auto &FileAndReplaces : FileToReplaces) {
+ for (const auto &FileAndReplaces : groupReplacementsByFile(
+ Rewrite.getSourceMgr().getFileManager(), FileToReplaces)) {
const std::string &FilePath = FileAndReplaces.first;
auto &CurReplaces = FileAndReplaces.second;
diff --git a/lib/Tooling/RefactoringCallbacks.cpp b/lib/Tooling/RefactoringCallbacks.cpp
index 4de125ec02aa..e900c23e4f64 100644
--- a/lib/Tooling/RefactoringCallbacks.cpp
+++ b/lib/Tooling/RefactoringCallbacks.cpp
@@ -39,11 +39,16 @@ ReplaceStmtWithText::ReplaceStmtWithText(StringRef FromId, StringRef ToText)
void ReplaceStmtWithText::run(
const ast_matchers::MatchFinder::MatchResult &Result) {
- if (const Stmt *FromMatch = Result.Nodes.getStmtAs<Stmt>(FromId)) {
- Replace.insert(tooling::Replacement(
+ if (const Stmt *FromMatch = Result.Nodes.getNodeAs<Stmt>(FromId)) {
+ auto Err = Replace.add(tooling::Replacement(
*Result.SourceManager,
- CharSourceRange::getTokenRange(FromMatch->getSourceRange()),
- ToText));
+ CharSourceRange::getTokenRange(FromMatch->getSourceRange()), ToText));
+ // FIXME: better error handling. For now, just print error message in the
+ // release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false);
+ }
}
}
@@ -52,11 +57,18 @@ ReplaceStmtWithStmt::ReplaceStmtWithStmt(StringRef FromId, StringRef ToId)
void ReplaceStmtWithStmt::run(
const ast_matchers::MatchFinder::MatchResult &Result) {
- const Stmt *FromMatch = Result.Nodes.getStmtAs<Stmt>(FromId);
- const Stmt *ToMatch = Result.Nodes.getStmtAs<Stmt>(ToId);
- if (FromMatch && ToMatch)
- Replace.insert(replaceStmtWithStmt(
- *Result.SourceManager, *FromMatch, *ToMatch));
+ const Stmt *FromMatch = Result.Nodes.getNodeAs<Stmt>(FromId);
+ const Stmt *ToMatch = Result.Nodes.getNodeAs<Stmt>(ToId);
+ if (FromMatch && ToMatch) {
+ auto Err = Replace.add(
+ replaceStmtWithStmt(*Result.SourceManager, *FromMatch, *ToMatch));
+ // FIXME: better error handling. For now, just print error message in the
+ // release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false);
+ }
+ }
}
ReplaceIfStmtWithItsBody::ReplaceIfStmtWithItsBody(StringRef Id,
@@ -65,14 +77,28 @@ ReplaceIfStmtWithItsBody::ReplaceIfStmtWithItsBody(StringRef Id,
void ReplaceIfStmtWithItsBody::run(
const ast_matchers::MatchFinder::MatchResult &Result) {
- if (const IfStmt *Node = Result.Nodes.getStmtAs<IfStmt>(Id)) {
+ if (const IfStmt *Node = Result.Nodes.getNodeAs<IfStmt>(Id)) {
const Stmt *Body = PickTrueBranch ? Node->getThen() : Node->getElse();
if (Body) {
- Replace.insert(replaceStmtWithStmt(*Result.SourceManager, *Node, *Body));
+ auto Err =
+ Replace.add(replaceStmtWithStmt(*Result.SourceManager, *Node, *Body));
+ // FIXME: better error handling. For now, just print error message in the
+ // release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false);
+ }
} else if (!PickTrueBranch) {
// If we want to use the 'else'-branch, but it doesn't exist, delete
// the whole 'if'.
- Replace.insert(replaceStmtWithText(*Result.SourceManager, *Node, ""));
+ auto Err =
+ Replace.add(replaceStmtWithText(*Result.SourceManager, *Node, ""));
+ // FIXME: better error handling. For now, just print error message in the
+ // release version.
+ if (Err) {
+ llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ assert(false);
+ }
}
}
}
diff --git a/lib/Tooling/Tooling.cpp b/lib/Tooling/Tooling.cpp
index 4c7fed1e617c..529c47ef1e7a 100644
--- a/lib/Tooling/Tooling.cpp
+++ b/lib/Tooling/Tooling.cpp
@@ -13,23 +13,26 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/Tooling.h"
-#include "clang/AST/ASTConsumer.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Frontend/ASTUnit.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Tooling/ArgumentsAdjusters.h"
#include "clang/Tooling/CompilationDatabase.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/Option/ArgList.h"
#include "llvm/Option/Option.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
#include <utility>
@@ -240,6 +243,11 @@ bool ToolInvocation::run() {
Argv.push_back(Str.c_str());
const char *const BinaryName = Argv[0];
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ unsigned MissingArgIndex, MissingArgCount;
+ std::unique_ptr<llvm::opt::OptTable> Opts(driver::createDriverOptTable());
+ llvm::opt::InputArgList ParsedArgs = Opts->ParseArgs(
+ ArrayRef<const char *>(Argv).slice(1), MissingArgIndex, MissingArgCount);
+ ParseDiagnosticArgs(*DiagOpts, ParsedArgs);
TextDiagnosticPrinter DiagnosticPrinter(
llvm::errs(), &*DiagOpts);
DiagnosticsEngine Diagnostics(